Refactor logging, improve error handling workflow, clang-format. (#3385)

This commit is contained in:
Daniel Patterson 2016-12-06 12:30:46 -08:00 committed by GitHub
parent 6f4c6e84ae
commit 468d8c0031
62 changed files with 1778 additions and 1607 deletions

View File

@ -56,6 +56,8 @@ set(OSRM_VERSION_MINOR 5)
set(OSRM_VERSION_PATCH 0) set(OSRM_VERSION_PATCH 0)
set(OSRM_VERSION "${OSRM_VERSION_MAJOR}.${OSRM_VERSION_MINOR}.${OSRM_VERSION_PATCH}") set(OSRM_VERSION "${OSRM_VERSION_MAJOR}.${OSRM_VERSION_MINOR}.${OSRM_VERSION_PATCH}")
add_definitions(-DOSRM_PROJECT_DIR="${CMAKE_CURRENT_SOURCE_DIR}")
# these two functions build up custom variables: # these two functions build up custom variables:
# DEPENDENCIES_INCLUDE_DIRS and OSRM_DEFINES # DEPENDENCIES_INCLUDE_DIRS and OSRM_DEFINES
# These variables we want to pass to # These variables we want to pass to

View File

@ -6,8 +6,8 @@
#include "util/deallocating_vector.hpp" #include "util/deallocating_vector.hpp"
#include "util/dynamic_graph.hpp" #include "util/dynamic_graph.hpp"
#include "util/integer_range.hpp" #include "util/integer_range.hpp"
#include "util/log.hpp"
#include "util/percent.hpp" #include "util/percent.hpp"
#include "util/simple_logger.hpp"
#include "util/timing_util.hpp" #include "util/timing_util.hpp"
#include "util/typedefs.hpp" #include "util/typedefs.hpp"
#include "util/xor_fast_hash.hpp" #include "util/xor_fast_hash.hpp"
@ -156,11 +156,10 @@ class GraphContractor
#ifndef NDEBUG #ifndef NDEBUG
if (static_cast<unsigned int>(std::max(diter->weight, 1)) > 24 * 60 * 60 * 10) if (static_cast<unsigned int>(std::max(diter->weight, 1)) > 24 * 60 * 60 * 10)
{ {
util::SimpleLogger().Write(logWARNING) util::Log(logWARNING) << "Edge weight large -> "
<< "Edge weight large -> " << static_cast<unsigned int>(std::max(diter->weight, 1))
<< static_cast<unsigned int>(std::max(diter->weight, 1)) << " : " << " : " << static_cast<unsigned int>(diter->source) << " -> "
<< static_cast<unsigned int>(diter->source) << " -> " << static_cast<unsigned int>(diter->target);
<< static_cast<unsigned int>(diter->target);
} }
#endif #endif
edges.emplace_back(diter->source, edges.emplace_back(diter->source,
@ -245,15 +244,14 @@ class GraphContractor
} }
} }
} }
util::SimpleLogger().Write() << "merged " << edges.size() - edge << " edges out of " util::Log() << "merged " << edges.size() - edge << " edges out of " << edges.size();
<< edges.size();
edges.resize(edge); edges.resize(edge);
contractor_graph = std::make_shared<ContractorGraph>(nodes, edges); contractor_graph = std::make_shared<ContractorGraph>(nodes, edges);
edges.clear(); edges.clear();
edges.shrink_to_fit(); edges.shrink_to_fit();
BOOST_ASSERT(0 == edges.capacity()); BOOST_ASSERT(0 == edges.capacity());
util::SimpleLogger().Write() << "contractor finished initalization"; util::Log() << "contractor finished initalization";
} }
void Run(double core_factor = 1.0) void Run(double core_factor = 1.0)
@ -270,7 +268,6 @@ class GraphContractor
const constexpr size_t DeleteGrainSize = 1; const constexpr size_t DeleteGrainSize = 1;
const NodeID number_of_nodes = contractor_graph->GetNumberOfNodes(); const NodeID number_of_nodes = contractor_graph->GetNumberOfNodes();
util::Percent p(number_of_nodes);
ThreadDataContainer thread_data_list(number_of_nodes); ThreadDataContainer thread_data_list(number_of_nodes);
@ -292,9 +289,10 @@ class GraphContractor
bool use_cached_node_priorities = !node_levels.empty(); bool use_cached_node_priorities = !node_levels.empty();
if (use_cached_node_priorities) if (use_cached_node_priorities)
{ {
std::cout << "using cached node priorities ..." << std::flush; util::UnbufferedLog log;
log << "using cached node priorities ...";
node_priorities.swap(node_levels); node_priorities.swap(node_levels);
std::cout << "ok" << std::endl; log << "ok";
} }
else else
{ {
@ -302,7 +300,8 @@ class GraphContractor
node_priorities.resize(number_of_nodes); node_priorities.resize(number_of_nodes);
node_levels.resize(number_of_nodes); node_levels.resize(number_of_nodes);
std::cout << "initializing elimination PQ ..." << std::flush; util::UnbufferedLog log;
log << "initializing elimination PQ ...";
tbb::parallel_for(tbb::blocked_range<int>(0, number_of_nodes, PQGrainSize), tbb::parallel_for(tbb::blocked_range<int>(0, number_of_nodes, PQGrainSize),
[this, &node_priorities, &node_depth, &thread_data_list]( [this, &node_priorities, &node_depth, &thread_data_list](
const tbb::blocked_range<int> &range) { const tbb::blocked_range<int> &range) {
@ -313,11 +312,14 @@ class GraphContractor
this->EvaluateNodePriority(data, node_depth[x], x); this->EvaluateNodePriority(data, node_depth[x], x);
} }
}); });
std::cout << "ok" << std::endl; log << "ok";
} }
BOOST_ASSERT(node_priorities.size() == number_of_nodes); BOOST_ASSERT(node_priorities.size() == number_of_nodes);
std::cout << "preprocessing " << number_of_nodes << " nodes ..." << std::flush; util::Log() << "preprocessing " << number_of_nodes << " nodes ...";
util::UnbufferedLog log;
util::Percent p(log, number_of_nodes);
unsigned current_level = 0; unsigned current_level = 0;
bool flushed_contractor = false; bool flushed_contractor = false;
@ -331,7 +333,7 @@ class GraphContractor
new_edge_set; // this one is not explicitely new_edge_set; // this one is not explicitely
// cleared since it goes out of // cleared since it goes out of
// scope anywa // scope anywa
std::cout << " [flush " << number_of_contracted_nodes << " nodes] " << std::flush; log << " [flush " << number_of_contracted_nodes << " nodes] ";
// Delete old heap data to free memory that we need for the coming operations // Delete old heap data to free memory that we need for the coming operations
thread_data_list.data.clear(); thread_data_list.data.clear();
@ -599,9 +601,8 @@ class GraphContractor
is_core_node.clear(); is_core_node.clear();
} }
util::SimpleLogger().Write() << "[core] " << remaining_nodes.size() << " nodes " util::Log() << "[core] " << remaining_nodes.size() << " nodes "
<< contractor_graph->GetNumberOfEdges() << " edges." << contractor_graph->GetNumberOfEdges() << " edges.";
<< std::endl;
thread_data_list.data.clear(); thread_data_list.data.clear();
} }
@ -618,8 +619,9 @@ class GraphContractor
template <class Edge> inline void GetEdges(util::DeallocatingVector<Edge> &edges) template <class Edge> inline void GetEdges(util::DeallocatingVector<Edge> &edges)
{ {
util::Percent p(contractor_graph->GetNumberOfNodes()); util::UnbufferedLog log;
util::SimpleLogger().Write() << "Getting edges of minimized graph"; log << "Getting edges of minimized graph ";
util::Percent p(log, contractor_graph->GetNumberOfNodes());
const NodeID number_of_nodes = contractor_graph->GetNumberOfNodes(); const NodeID number_of_nodes = contractor_graph->GetNumberOfNodes();
if (contractor_graph->GetNumberOfNodes()) if (contractor_graph->GetNumberOfNodes())
{ {

View File

@ -12,11 +12,13 @@
#include "util/guidance/turn_lanes.hpp" #include "util/guidance/turn_lanes.hpp"
#include "engine/geospatial_query.hpp" #include "engine/geospatial_query.hpp"
#include "util/exception.hpp"
#include "util/exception_utils.hpp"
#include "util/guidance/turn_bearing.hpp" #include "util/guidance/turn_bearing.hpp"
#include "util/log.hpp"
#include "util/packed_vector.hpp" #include "util/packed_vector.hpp"
#include "util/range_table.hpp" #include "util/range_table.hpp"
#include "util/rectangle.hpp" #include "util/rectangle.hpp"
#include "util/simple_logger.hpp"
#include "util/static_graph.hpp" #include "util/static_graph.hpp"
#include "util/static_rtree.hpp" #include "util/static_rtree.hpp"
#include "util/typedefs.hpp" #include "util/typedefs.hpp"
@ -116,7 +118,7 @@ class ContiguousInternalMemoryDataFacadeBase : public BaseDataFacade
{ {
m_check_sum = m_check_sum =
*data_layout.GetBlockPtr<unsigned>(memory_block, storage::DataLayout::HSGR_CHECKSUM); *data_layout.GetBlockPtr<unsigned>(memory_block, storage::DataLayout::HSGR_CHECKSUM);
util::SimpleLogger().Write() << "set checksum: " << m_check_sum; util::Log() << "set checksum: " << m_check_sum;
} }
void InitializeProfilePropertiesPointer(storage::DataLayout &data_layout, char *memory_block) void InitializeProfilePropertiesPointer(storage::DataLayout &data_layout, char *memory_block)
@ -144,9 +146,9 @@ class ContiguousInternalMemoryDataFacadeBase : public BaseDataFacade
file_index_path = boost::filesystem::path(file_index_ptr); file_index_path = boost::filesystem::path(file_index_ptr);
if (!boost::filesystem::exists(file_index_path)) if (!boost::filesystem::exists(file_index_path))
{ {
util::SimpleLogger().Write(logDEBUG) << "Leaf file name " << file_index_path.string(); util::Log(logDEBUG) << "Leaf file name " << file_index_path.string();
throw util::exception("Could not load " + file_index_path.string() + throw util::exception("Could not load " + file_index_path.string() +
"Is any data loaded into shared memory?"); "Is any data loaded into shared memory?" + SOURCE_REF);
} }
auto tree_ptr = auto tree_ptr =

View File

@ -56,8 +56,7 @@ class SharedMemoryDataFacade : public ContiguousInternalMemoryDataFacadeBase
if (current_timestamp->timestamp == shared_timestamp) if (current_timestamp->timestamp == shared_timestamp)
{ {
util::SimpleLogger().Write(logDEBUG) << "Retaining data with shared timestamp " util::Log(logDEBUG) << "Retaining data with shared timestamp " << shared_timestamp;
<< shared_timestamp;
} }
else else
{ {
@ -74,8 +73,7 @@ class SharedMemoryDataFacade : public ContiguousInternalMemoryDataFacadeBase
: shared_barriers(shared_barriers_), layout_region(layout_region_), : shared_barriers(shared_barriers_), layout_region(layout_region_),
data_region(data_region_), shared_timestamp(shared_timestamp_) data_region(data_region_), shared_timestamp(shared_timestamp_)
{ {
util::SimpleLogger().Write(logDEBUG) << "Loading new data with shared timestamp " util::Log(logDEBUG) << "Loading new data with shared timestamp " << shared_timestamp;
<< shared_timestamp;
BOOST_ASSERT(storage::SharedMemory::RegionExists(layout_region)); BOOST_ASSERT(storage::SharedMemory::RegionExists(layout_region));
m_layout_memory = storage::makeSharedMemory(layout_region); m_layout_memory = storage::makeSharedMemory(layout_region);

View File

@ -18,6 +18,8 @@
#include "engine/plugins/trip.hpp" #include "engine/plugins/trip.hpp"
#include "engine/plugins/viaroute.hpp" #include "engine/plugins/viaroute.hpp"
#include "engine/status.hpp" #include "engine/status.hpp"
#include "util/exception.hpp"
#include "util/exception_utils.hpp"
#include "util/json_container.hpp" #include "util/json_container.hpp"
#include <memory> #include <memory>

View File

@ -235,10 +235,10 @@ class AlternativeRouting final
} }
} }
// util::SimpleLogger().Write(logDEBUG) << "fwd_search_space size: " << // util::Log(logDEBUG) << "fwd_search_space size: " <<
// forward_search_space.size() << ", marked " << approximated_forward_sharing.size() << " // forward_search_space.size() << ", marked " << approximated_forward_sharing.size() << "
// nodes"; // nodes";
// util::SimpleLogger().Write(logDEBUG) << "rev_search_space size: " << // util::Log(logDEBUG) << "rev_search_space size: " <<
// reverse_search_space.size() << ", marked " << approximated_reverse_sharing.size() << " // reverse_search_space.size() << ", marked " << approximated_reverse_sharing.size() << "
// nodes"; // nodes";
@ -601,7 +601,7 @@ class AlternativeRouting final
// //compute forward sharing // //compute forward sharing
// while( (packed_alternate_path[aindex] == packed_shortest_path[aindex]) && // while( (packed_alternate_path[aindex] == packed_shortest_path[aindex]) &&
// (packed_alternate_path[aindex+1] == packed_shortest_path[aindex+1]) ) { // (packed_alternate_path[aindex+1] == packed_shortest_path[aindex+1]) ) {
// // util::SimpleLogger().Write() << "retrieving edge (" << // // util::Log() << "retrieving edge (" <<
// packed_alternate_path[aindex] << "," << packed_alternate_path[aindex+1] << ")"; // packed_alternate_path[aindex] << "," << packed_alternate_path[aindex+1] << ")";
// EdgeID edgeID = facade->FindEdgeInEitherDirection(packed_alternate_path[aindex], // EdgeID edgeID = facade->FindEdgeInEitherDirection(packed_alternate_path[aindex],
// packed_alternate_path[aindex+1]); // packed_alternate_path[aindex+1]);
@ -640,7 +640,7 @@ class AlternativeRouting final
const NodeID node = forward_heap.DeleteMin(); const NodeID node = forward_heap.DeleteMin();
const int weight = forward_heap.GetKey(node); const int weight = forward_heap.GetKey(node);
// const NodeID parentnode = forward_heap.GetData(node).parent; // const NodeID parentnode = forward_heap.GetData(node).parent;
// util::SimpleLogger().Write() << (is_forward_directed ? "[fwd] " : "[rev] ") << "settled // util::Log() << (is_forward_directed ? "[fwd] " : "[rev] ") << "settled
// edge (" // edge ("
// << parentnode << "," << node << "), dist: " << weight; // << parentnode << "," << node << "), dist: " << weight;
@ -665,11 +665,11 @@ class AlternativeRouting final
{ {
*middle_node = node; *middle_node = node;
*upper_bound_to_shortest_path_weight = new_weight; *upper_bound_to_shortest_path_weight = new_weight;
// util::SimpleLogger().Write() << "accepted middle_node " << *middle_node // util::Log() << "accepted middle_node " << *middle_node
// << " at // << " at
// weight " << new_weight; // weight " << new_weight;
// } else { // } else {
// util::SimpleLogger().Write() << "discarded middle_node " << *middle_node // util::Log() << "discarded middle_node " << *middle_node
// << " // << "
// at weight " << new_weight; // at weight " << new_weight;
} }

View File

@ -2,7 +2,7 @@
#define TRIP_BRUTE_FORCE_HPP #define TRIP_BRUTE_FORCE_HPP
#include "util/dist_table_wrapper.hpp" #include "util/dist_table_wrapper.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include "util/typedefs.hpp" #include "util/typedefs.hpp"
#include "osrm/json_container.hpp" #include "osrm/json_container.hpp"

View File

@ -2,7 +2,7 @@
#define TRIP_NEAREST_NEIGHBOUR_HPP #define TRIP_NEAREST_NEIGHBOUR_HPP
#include "util/dist_table_wrapper.hpp" #include "util/dist_table_wrapper.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include "util/typedefs.hpp" #include "util/typedefs.hpp"
#include "osrm/json_container.hpp" #include "osrm/json_container.hpp"

View File

@ -12,7 +12,7 @@
#include <boost/functional/hash.hpp> #include <boost/functional/hash.hpp>
#include "util/json_container.hpp" #include "util/json_container.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include "util/typedefs.hpp" #include "util/typedefs.hpp"
namespace osrm namespace osrm

View File

@ -66,13 +66,14 @@ class RasterGrid
} }
catch (std::exception const &ex) catch (std::exception const &ex)
{ {
throw util::exception( throw util::exception("Failed to read from raster source " + filepath.string() + ": " +
std::string("Failed to read from raster source with exception: ") + ex.what()); ex.what() + SOURCE_REF);
} }
if (!r || itr != end) if (!r || itr != end)
{ {
throw util::exception("Failed to parse raster source correctly."); throw util::exception("Failed to parse raster source: " + filepath.string() +
SOURCE_REF);
} }
} }

View File

@ -8,7 +8,7 @@
#include "util/typedefs.hpp" #include "util/typedefs.hpp"
#include "util/integer_range.hpp" #include "util/integer_range.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include "util/std_hash.hpp" #include "util/std_hash.hpp"
#include "util/timing_util.hpp" #include "util/timing_util.hpp"
@ -146,8 +146,8 @@ template <typename GraphT> class TarjanSCC
if (size_of_current_component > 1000) if (size_of_current_component > 1000)
{ {
util::SimpleLogger().Write() << "large component [" << component_index util::Log() << "large component [" << component_index
<< "]=" << size_of_current_component; << "]=" << size_of_current_component;
} }
++component_index; ++component_index;
@ -158,7 +158,7 @@ template <typename GraphT> class TarjanSCC
} }
TIMER_STOP(SCC_RUN); TIMER_STOP(SCC_RUN);
util::SimpleLogger().Write() << "SCC run took: " << TIMER_MSEC(SCC_RUN) / 1000. << "s"; util::Log() << "SCC run took: " << TIMER_MSEC(SCC_RUN) / 1000. << "s";
size_one_counter = std::count_if(component_size_vector.begin(), size_one_counter = std::count_if(component_size_vector.begin(),
component_size_vector.end(), component_size_vector.end(),

View File

@ -6,7 +6,7 @@
#include "server/service_handler.hpp" #include "server/service_handler.hpp"
#include "util/integer_range.hpp" #include "util/integer_range.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include <boost/asio.hpp> #include <boost/asio.hpp>
#include <boost/bind.hpp> #include <boost/bind.hpp>
@ -36,8 +36,7 @@ class Server
static std::shared_ptr<Server> static std::shared_ptr<Server>
CreateServer(std::string &ip_address, int ip_port, unsigned requested_num_threads) CreateServer(std::string &ip_address, int ip_port, unsigned requested_num_threads)
{ {
util::SimpleLogger().Write() << "http 1.1 compression handled by zlib version " util::Log() << "http 1.1 compression handled by zlib version " << zlibVersion();
<< zlibVersion();
const unsigned hardware_threads = std::max(1u, std::thread::hardware_concurrency()); const unsigned hardware_threads = std::max(1u, std::thread::hardware_concurrency());
const unsigned real_num_threads = std::min(hardware_threads, requested_num_threads); const unsigned real_num_threads = std::min(hardware_threads, requested_num_threads);
return std::make_shared<Server>(ip_address, ip_port, real_num_threads); return std::make_shared<Server>(ip_address, ip_port, real_num_threads);
@ -62,7 +61,7 @@ class Server
acceptor.bind(endpoint); acceptor.bind(endpoint);
acceptor.listen(); acceptor.listen();
util::SimpleLogger().Write() << "Listening on: " << acceptor.local_endpoint(); util::Log() << "Listening on: " << acceptor.local_endpoint();
acceptor.async_accept( acceptor.async_accept(
new_connection->socket(), new_connection->socket(),

View File

@ -2,8 +2,9 @@
#define OSRM_STORAGE_IO_HPP_ #define OSRM_STORAGE_IO_HPP_
#include "util/exception.hpp" #include "util/exception.hpp"
#include "util/exception_utils.hpp"
#include "util/fingerprint.hpp" #include "util/fingerprint.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include <boost/filesystem/fstream.hpp> #include <boost/filesystem/fstream.hpp>
#include <boost/iostreams/seek.hpp> #include <boost/iostreams/seek.hpp>
@ -55,7 +56,7 @@ class FileReader
if (flag == VerifyFingerprint && !ReadAndCheckFingerprint()) if (flag == VerifyFingerprint && !ReadAndCheckFingerprint())
{ {
throw util::exception("Fingerprint mismatch in " + filepath.string()); throw util::exception("Fingerprint mismatch in " + filepath_.string() + SOURCE_REF);
} }
} }
@ -77,9 +78,9 @@ class FileReader
if (result.eof()) if (result.eof())
{ {
throw util::exception("Error reading from " + filepath.string() + throw util::exception("Error reading from " + filepath.string() +
": Unexpected end of file"); ": Unexpected end of file " + SOURCE_REF);
} }
throw util::exception("Error reading from " + filepath.string()); throw util::exception("Error reading from " + filepath.string() + " " + SOURCE_REF);
} }
} }

View File

@ -8,7 +8,7 @@
#include "storage/io.hpp" #include "storage/io.hpp"
#include "util/exception.hpp" #include "util/exception.hpp"
#include "util/fingerprint.hpp" #include "util/fingerprint.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include "util/static_graph.hpp" #include "util/static_graph.hpp"
#include <boost/filesystem/fstream.hpp> #include <boost/filesystem/fstream.hpp>
@ -44,8 +44,8 @@ inline HSGRHeader readHSGRHeader(io::FileReader &input_file)
const auto fingerprint_loaded = input_file.ReadOne<util::FingerPrint>(); const auto fingerprint_loaded = input_file.ReadOne<util::FingerPrint>();
if (!fingerprint_loaded.TestGraphUtil(fingerprint_valid)) if (!fingerprint_loaded.TestGraphUtil(fingerprint_valid))
{ {
util::SimpleLogger().Write(logWARNING) << ".hsgr was prepared with different build.\n" util::Log(logWARNING) << ".hsgr was prepared with different build.\n"
"Reprocess to get rid of this warning."; "Reprocess to get rid of this warning.";
} }
HSGRHeader header; HSGRHeader header;

View File

@ -2,7 +2,8 @@
#define SHARED_DATA_TYPE_HPP #define SHARED_DATA_TYPE_HPP
#include "util/exception.hpp" #include "util/exception.hpp"
#include "util/simple_logger.hpp" #include "util/exception_utils.hpp"
#include "util/log.hpp"
#include <array> #include <array>
#include <cstdint> #include <cstdint>
@ -175,13 +176,13 @@ struct DataLayout
bool end_canary_alive = std::equal(CANARY, CANARY + sizeof(CANARY), end_canary_ptr); bool end_canary_alive = std::equal(CANARY, CANARY + sizeof(CANARY), end_canary_ptr);
if (!start_canary_alive) if (!start_canary_alive)
{ {
throw util::exception(std::string("Start canary of block corrupted. (") + throw util::exception("Start canary of block corrupted. (" +
block_id_to_name[bid] + ")"); std::string(block_id_to_name[bid]) + ")" + SOURCE_REF);
} }
if (!end_canary_alive) if (!end_canary_alive)
{ {
throw util::exception(std::string("End canary of block corrupted. (") + throw util::exception("End canary of block corrupted. (" +
block_id_to_name[bid] + ")"); std::string(block_id_to_name[bid]) + ")" + SOURCE_REF);
} }
} }

View File

@ -2,7 +2,8 @@
#define SHARED_MEMORY_HPP #define SHARED_MEMORY_HPP
#include "util/exception.hpp" #include "util/exception.hpp"
#include "util/simple_logger.hpp" #include "util/exception_utils.hpp"
#include "util/log.hpp"
#include <boost/filesystem.hpp> #include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp> #include <boost/filesystem/fstream.hpp>
@ -62,8 +63,7 @@ class SharedMemory
{ {
shm = boost::interprocess::xsi_shared_memory(boost::interprocess::open_only, key); shm = boost::interprocess::xsi_shared_memory(boost::interprocess::open_only, key);
util::SimpleLogger().Write(logDEBUG) << "opening " << shm.get_shmid() << " from id " util::Log(logDEBUG) << "opening " << shm.get_shmid() << " from id " << id;
<< id;
region = boost::interprocess::mapped_region(shm, access); region = boost::interprocess::mapped_region(shm, access);
} }
@ -72,14 +72,14 @@ class SharedMemory
{ {
shm = boost::interprocess::xsi_shared_memory( shm = boost::interprocess::xsi_shared_memory(
boost::interprocess::open_or_create, key, size); boost::interprocess::open_or_create, key, size);
util::SimpleLogger().Write(logDEBUG) << "opening/creating " << shm.get_shmid() util::Log(logDEBUG) << "opening/creating " << shm.get_shmid() << " from id " << id
<< " from id " << id << " with size " << size; << " with size " << size;
#ifdef __linux__ #ifdef __linux__
if (-1 == shmctl(shm.get_shmid(), SHM_LOCK, nullptr)) if (-1 == shmctl(shm.get_shmid(), SHM_LOCK, nullptr))
{ {
if (ENOMEM == errno) if (ENOMEM == errno)
{ {
util::SimpleLogger().Write(logWARNING) << "could not lock shared memory to RAM"; util::Log(logWARNING) << "could not lock shared memory to RAM";
} }
} }
#endif #endif
@ -133,7 +133,7 @@ class SharedMemory
static bool Remove(const boost::interprocess::xsi_key &key) static bool Remove(const boost::interprocess::xsi_key &key)
{ {
boost::interprocess::xsi_shared_memory xsi(boost::interprocess::open_only, key); boost::interprocess::xsi_shared_memory xsi(boost::interprocess::open_only, key);
util::SimpleLogger().Write(logDEBUG) << "deallocating prev memory " << xsi.get_shmid(); util::Log(logDEBUG) << "deallocating prev memory " << xsi.get_shmid();
return boost::interprocess::xsi_shared_memory::remove(xsi.get_shmid()); return boost::interprocess::xsi_shared_memory::remove(xsi.get_shmid());
} }
@ -173,8 +173,7 @@ class SharedMemory
shm.truncate(size); shm.truncate(size);
region = boost::interprocess::mapped_region(shm, access); region = boost::interprocess::mapped_region(shm, access);
util::SimpleLogger().Write(logDEBUG) << "writeable memory allocated " << size util::Log(logDEBUG) << "writeable memory allocated " << size << " bytes";
<< " bytes";
} }
} }
@ -221,7 +220,7 @@ class SharedMemory
static bool Remove(char *key) static bool Remove(char *key)
{ {
util::SimpleLogger().Write(logDEBUG) << "deallocating prev memory for key " << key; util::Log(logDEBUG) << "deallocating prev memory for key " << key;
return boost::interprocess::shared_memory_object::remove(key); return boost::interprocess::shared_memory_object::remove(key);
} }
@ -242,7 +241,7 @@ makeSharedMemory(const IdentifierT &id, const uint64_t size = 0, bool read_write
{ {
if (0 == size) if (0 == size)
{ {
throw util::exception("lock file does not exist, exiting"); throw util::exception("lock file does not exist, exiting" + SOURCE_REF);
} }
else else
{ {
@ -253,9 +252,9 @@ makeSharedMemory(const IdentifierT &id, const uint64_t size = 0, bool read_write
} }
catch (const boost::interprocess::interprocess_exception &e) catch (const boost::interprocess::interprocess_exception &e)
{ {
util::SimpleLogger().Write(logWARNING) << "caught exception: " << e.what() << ", code " util::Log(logERROR) << "Error while attempting to allocate shared memory: " << e.what()
<< e.get_error_code(); << ", code " << e.get_error_code();
throw util::exception(e.what()); throw util::exception(e.what() + SOURCE_REF);
} }
} }
} }

View File

@ -0,0 +1,15 @@
#ifndef SOURCE_MACROS_HPP
#define SOURCE_MACROS_HPP
#include <cstring>
// Helper macros, don't use these ones
// STRIP the OSRM_PROJECT_DIR from the front of a filename. Expected to come
// from CMake's CURRENT_SOURCE_DIR, which doesn't have a trailing /, hence the +1
#define _PROJECT_RELATIVE_PATH(x) std::string(x).substr(strlen(OSRM_PROJECT_DIR) + 1)
// Return the path of a file, relative to the OSRM_PROJECT_DIR
#define _OSRM_SOURCE_FILE _PROJECT_RELATIVE_PATH(__FILE__)
// This is the macro to use
#define SOURCE_REF std::string(" (at ") + _OSRM_SOURCE_FILE + ":" + std::to_string(__LINE__) + ")"
#endif // SOURCE_MACROS_HPP

View File

@ -1,4 +1,5 @@
#include "util/exception.hpp" #include "util/exception.hpp"
#include "util/exception_utils.hpp"
#include <boost/uuid/name_generator.hpp> #include <boost/uuid/name_generator.hpp>
#include <boost/uuid/uuid_generators.hpp> #include <boost/uuid/uuid_generators.hpp>
@ -57,7 +58,7 @@ bool FingerPrint::TestGraphUtil(const FingerPrint &other) const
{ {
if (!IsMagicNumberOK(other)) if (!IsMagicNumberOK(other))
{ {
throw exception("hsgr input file misses magic number. Check or reprocess the file"); throw exception(std::string("hsgr input file misses magic number. Check or reprocess the file") + SOURCE_REF);
} }
return std::equal(md5_graph, md5_graph + 32, other.md5_graph); return std::equal(md5_graph, md5_graph + 32, other.md5_graph);
} }
@ -66,7 +67,7 @@ bool FingerPrint::TestContractor(const FingerPrint &other) const
{ {
if (!IsMagicNumberOK(other)) if (!IsMagicNumberOK(other))
{ {
throw exception("osrm input file misses magic number. Check or reprocess the file"); throw exception(std::string("osrm input file misses magic number. Check or reprocess the file") + SOURCE_REF);
} }
return std::equal(md5_prepare, md5_prepare + 32, other.md5_prepare); return std::equal(md5_prepare, md5_prepare + 32, other.md5_prepare);
} }
@ -75,7 +76,7 @@ bool FingerPrint::TestRTree(const FingerPrint &other) const
{ {
if (!IsMagicNumberOK(other)) if (!IsMagicNumberOK(other))
{ {
throw exception("r-tree input file misses magic number. Check or reprocess the file"); throw exception(std::string("r-tree input file misses magic number. Check or reprocess the file") + SOURCE_REF);
} }
return std::equal(md5_tree, md5_tree + 32, other.md5_tree); return std::equal(md5_tree, md5_tree + 32, other.md5_tree);
} }
@ -84,7 +85,7 @@ bool FingerPrint::TestQueryObjects(const FingerPrint &other) const
{ {
if (!IsMagicNumberOK(other)) if (!IsMagicNumberOK(other))
{ {
throw exception("missing magic number. Check or reprocess the file"); throw exception(std::string("missing magic number. Check or reprocess the file") + SOURCE_REF);
} }
return std::equal(md5_objects, md5_objects + 32, other.md5_objects); return std::equal(md5_objects, md5_objects + 32, other.md5_objects);
} }

View File

@ -7,7 +7,7 @@
#include "util/json_container.hpp" #include "util/json_container.hpp"
#include "util/json_renderer.hpp" #include "util/json_renderer.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
namespace osrm namespace osrm
{ {
@ -92,7 +92,7 @@ class GeojsonLogger
// out on log output. Such a sad life // out on log output. Such a sad life
if (ofs.is_open()) if (ofs.is_open())
{ {
util::SimpleLogger().Write(logWARNING) util::Log(logWARNING)
<< "Overwriting " << logfile << "Overwriting " << logfile
<< ". Is this desired behaviour? If this message occurs more than once rethink the " << ". Is this desired behaviour? If this message occurs more than once rethink the "
"location of your Logger Guard."; "location of your Logger Guard.";

View File

@ -8,7 +8,7 @@
#include "storage/io.hpp" #include "storage/io.hpp"
#include "util/exception.hpp" #include "util/exception.hpp"
#include "util/fingerprint.hpp" #include "util/fingerprint.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include "util/typedefs.hpp" #include "util/typedefs.hpp"
#include <boost/assert.hpp> #include <boost/assert.hpp>
@ -60,7 +60,7 @@ NodeID loadNodesFromFile(storage::io::FileReader &file_reader,
std::vector<extractor::QueryNode> &node_array) std::vector<extractor::QueryNode> &node_array)
{ {
NodeID number_of_nodes = file_reader.ReadElementCount32(); NodeID number_of_nodes = file_reader.ReadElementCount32();
SimpleLogger().Write() << "Importing number_of_nodes new = " << number_of_nodes << " nodes "; Log() << "Importing number_of_nodes new = " << number_of_nodes << " nodes ";
node_array.resize(number_of_nodes); node_array.resize(number_of_nodes);
@ -99,14 +99,14 @@ inline NodeID loadEdgesFromFile(storage::io::FileReader &file_reader,
BOOST_ASSERT(sizeof(EdgeID) == sizeof(number_of_edges)); BOOST_ASSERT(sizeof(EdgeID) == sizeof(number_of_edges));
edge_list.resize(number_of_edges); edge_list.resize(number_of_edges);
SimpleLogger().Write() << " and " << number_of_edges << " edges "; Log() << " and " << number_of_edges << " edges ";
file_reader.ReadInto(edge_list.data(), number_of_edges); file_reader.ReadInto(edge_list.data(), number_of_edges);
BOOST_ASSERT(edge_list.size() > 0); BOOST_ASSERT(edge_list.size() > 0);
#ifndef NDEBUG #ifndef NDEBUG
SimpleLogger().Write() << "Validating loaded edges..."; Log() << "Validating loaded edges...";
tbb::parallel_sort( tbb::parallel_sort(
edge_list.begin(), edge_list.begin(),
edge_list.end(), edge_list.end(),
@ -129,7 +129,7 @@ inline NodeID loadEdgesFromFile(storage::io::FileReader &file_reader,
} }
#endif #endif
SimpleLogger().Write() << "Graph loaded ok and has " << edge_list.size() << " edges"; Log() << "Graph loaded ok and has " << edge_list.size() << " edges";
return number_of_edges; return number_of_edges;
} }

View File

@ -11,8 +11,8 @@
#include "util/attributes.hpp" #include "util/attributes.hpp"
#include "util/guidance/bearing_class.hpp" #include "util/guidance/bearing_class.hpp"
#include "util/guidance/entry_class.hpp" #include "util/guidance/entry_class.hpp"
#include "util/log.hpp"
#include "util/name_table.hpp" #include "util/name_table.hpp"
#include "util/simple_logger.hpp"
#include <algorithm> #include <algorithm>
#include <string> #include <string>

View File

@ -1,7 +1,7 @@
#ifndef OSRM_INCLUDE_UTIL_IO_HPP_ #ifndef OSRM_INCLUDE_UTIL_IO_HPP_
#define OSRM_INCLUDE_UTIL_IO_HPP_ #define OSRM_INCLUDE_UTIL_IO_HPP_
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include <boost/filesystem.hpp> #include <boost/filesystem.hpp>
#include <boost/numeric/conversion/cast.hpp> #include <boost/numeric/conversion/cast.hpp>
@ -125,9 +125,10 @@ void deserializeAdjacencyArray(const std::string &filename,
// offsets have to match up with the size of the data // offsets have to match up with the size of the data
if (offsets.empty() || (offsets.back() != boost::numeric_cast<std::uint32_t>(data.size()))) if (offsets.empty() || (offsets.back() != boost::numeric_cast<std::uint32_t>(data.size())))
throw util::exception("Error in " + filename + (offsets.empty() throw util::exception(
? "Offsets are empty" "Error in " + filename +
: "Offset and data size do not match")); (offsets.empty() ? "Offsets are empty" : "Offset and data size do not match") +
SOURCE_REF);
} }
inline bool serializeFlags(const boost::filesystem::path &path, const std::vector<bool> &flags) inline bool serializeFlags(const boost::filesystem::path &path, const std::vector<bool> &flags)
@ -153,8 +154,7 @@ inline bool serializeFlags(const boost::filesystem::path &path, const std::vecto
++chunk_count; ++chunk_count;
flag_stream.write(reinterpret_cast<const char *>(&chunk), sizeof(chunk)); flag_stream.write(reinterpret_cast<const char *>(&chunk), sizeof(chunk));
} }
SimpleLogger().Write() << "Wrote " << number_of_bits << " bits in " << chunk_count Log() << "Wrote " << number_of_bits << " bits in " << chunk_count << " chunks (Flags).";
<< " chunks (Flags).";
return static_cast<bool>(flag_stream); return static_cast<bool>(flag_stream);
} }

70
include/util/log.hpp Normal file
View File

@ -0,0 +1,70 @@
#ifndef LOG_HPP
#define LOG_HPP
#include <atomic>
#include <mutex>
#include <sstream>
enum LogLevel
{
logINFO,
logWARNING,
logERROR,
logDEBUG
};
namespace osrm
{
namespace util
{
class LogPolicy
{
public:
void Unmute();
void Mute();
bool IsMute() const;
static LogPolicy &GetInstance();
LogPolicy(const LogPolicy &) = delete;
LogPolicy &operator=(const LogPolicy &) = delete;
private:
LogPolicy() : m_is_mute(true) {}
std::atomic<bool> m_is_mute;
};
class Log
{
public:
Log(LogLevel level_ = logINFO);
Log(LogLevel level_, std::ostream &ostream);
virtual ~Log();
std::mutex &get_mutex();
template <typename T> inline std::ostream &operator<<(const T &data) { return stream << data; }
protected:
LogLevel level;
std::ostringstream buffer;
std::ostream &stream;
};
/**
* Modified logger - this one doesn't buffer - it writes directly to stdout,
* and the final newline is only printed when the object is destructed.
* Useful for logging situations where you don't want to newline right away
*/
class UnbufferedLog : public Log
{
public:
UnbufferedLog(LogLevel level_ = logINFO);
};
}
}
#endif /* LOG_HPP */

View File

@ -5,6 +5,7 @@
#include <iostream> #include <iostream>
#include "util/isatty.hpp" #include "util/isatty.hpp"
#include "util/log.hpp"
namespace osrm namespace osrm
{ {
@ -13,8 +14,13 @@ namespace util
class Percent class Percent
{ {
Log &log;
public: public:
explicit Percent(unsigned max_value, unsigned step = 5) { Reinit(max_value, step); } explicit Percent(Log &log_, unsigned max_value, unsigned step = 5) : log{log_}
{
Reinit(max_value, step);
}
// Reinitializes // Reinitializes
void Reinit(unsigned max_value, unsigned step = 5) void Reinit(unsigned max_value, unsigned step = 5)
@ -36,7 +42,7 @@ class Percent
PrintPercent(current_value / static_cast<double>(m_max_value) * 100.); PrintPercent(current_value / static_cast<double>(m_max_value) * 100.);
} }
if (current_value + 1 == m_max_value) if (current_value + 1 == m_max_value)
std::cout << " 100%" << std::endl; log << " 100%";
} }
void PrintIncrement() void PrintIncrement()
@ -67,19 +73,17 @@ class Percent
m_last_percent += m_step; m_last_percent += m_step;
if (m_last_percent % 10 == 0) if (m_last_percent % 10 == 0)
{ {
std::cout << " " << m_last_percent << "% "; log << " " << m_last_percent << "% ";
} }
else else
{ {
std::cout << "."; log << ".";
} }
// When not on a TTY, print newlines after each progress indicator so // When not on a TTY, print newlines after each progress indicator so
// so that progress is visible to line-buffered logging systems // so that progress is visible to line-buffered logging systems
if (!IsStdoutATTY()) if (!IsStdoutATTY())
std::cout << std::endl; log << "" << std::endl;
std::cout.flush();
} }
} }
}; };

View File

@ -3,7 +3,7 @@
#include <boost/assert.hpp> #include <boost/assert.hpp>
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include <cstddef> #include <cstddef>

View File

@ -1,55 +0,0 @@
#ifndef SIMPLE_LOGGER_HPP
#define SIMPLE_LOGGER_HPP
#include <atomic>
#include <mutex>
#include <sstream>
enum LogLevel
{
logINFO,
logWARNING,
logDEBUG
};
namespace osrm
{
namespace util
{
class LogPolicy
{
public:
void Unmute();
void Mute();
bool IsMute() const;
static LogPolicy &GetInstance();
LogPolicy(const LogPolicy &) = delete;
LogPolicy &operator=(const LogPolicy &) = delete;
private:
LogPolicy() : m_is_mute(true) {}
std::atomic<bool> m_is_mute;
};
class SimpleLogger
{
public:
SimpleLogger();
virtual ~SimpleLogger();
std::mutex &get_mutex();
std::ostringstream &Write(LogLevel l = logINFO) noexcept;
private:
std::ostringstream os;
LogLevel level;
};
}
}
#endif /* SIMPLE_LOGGER_HPP */

View File

@ -378,7 +378,8 @@ class StaticRTree
catch (const std::exception &exc) catch (const std::exception &exc)
{ {
throw exception(boost::str(boost::format("Leaf file %1% mapping failed: %2%") % throw exception(boost::str(boost::format("Leaf file %1% mapping failed: %2%") %
leaf_file % exc.what())); leaf_file % exc.what()) +
SOURCE_REF);
} }
} }

View File

@ -7,12 +7,16 @@ set -o nounset
# Runs the Clang Tidy Tool in parallel on the code base. # Runs the Clang Tidy Tool in parallel on the code base.
# Requires a compilation database in the build directory. # Requires a compilation database in the build directory.
# This works on both OSX and Linux, it's a POSIX thingy
NPROC=$(getconf _NPROCESSORS_ONLN)
find src include unit_tests -type f -name '*.hpp' -o -name '*.cpp' \
find src include unit_tests -type f -name '*.hpp' -o -name '*.cpp' -print0 \
| xargs \ | xargs \
-0 \
-I{} \ -I{} \
-P $(nproc) \ -n 1 \
clang-tidy \ ./clang+llvm-3.9.0-x86_64-apple-darwin/bin/clang-tidy \
-p build \ -p build \
-header-filter='.*' \ -header-filter='.*' \
{} {}

View File

@ -9,10 +9,11 @@
#include "storage/io.hpp" #include "storage/io.hpp"
#include "storage/io.hpp" #include "storage/io.hpp"
#include "util/exception.hpp" #include "util/exception.hpp"
#include "util/exception_utils.hpp"
#include "util/graph_loader.hpp" #include "util/graph_loader.hpp"
#include "util/integer_range.hpp" #include "util/integer_range.hpp"
#include "util/io.hpp" #include "util/io.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include "util/static_graph.hpp" #include "util/static_graph.hpp"
#include "util/static_rtree.hpp" #include "util/static_rtree.hpp"
#include "util/string_util.hpp" #include "util/string_util.hpp"
@ -105,13 +106,13 @@ EdgeWeight getNewWeight(IterType speed_iter,
if (old_weight >= (new_segment_weight * log_edge_updates_factor)) if (old_weight >= (new_segment_weight * log_edge_updates_factor))
{ {
auto speed_file = segment_speed_filenames.at(speed_iter->speed_source.source - 1); auto speed_file = segment_speed_filenames.at(speed_iter->speed_source.source - 1);
util::SimpleLogger().Write(logWARNING) util::Log(logWARNING) << "[weight updates] Edge weight update from " << old_secs
<< "[weight updates] Edge weight update from " << old_secs << "s to " << new_secs << "s to " << new_secs
<< "s New speed: " << speed_iter->speed_source.speed << " kph" << "s New speed: " << speed_iter->speed_source.speed << " kph"
<< ". Old speed: " << approx_original_speed << " kph" << ". Old speed: " << approx_original_speed << " kph"
<< ". Segment length: " << segment_length << " m" << ". Segment length: " << segment_length << " m"
<< ". Segment: " << speed_iter->segment.from << "," << speed_iter->segment.to << ". Segment: " << speed_iter->segment.from << ","
<< " based on " << speed_file; << speed_iter->segment.to << " based on " << speed_file;
} }
} }
@ -131,12 +132,12 @@ int Contractor::Run()
if (config.core_factor > 1.0 || config.core_factor < 0) if (config.core_factor > 1.0 || config.core_factor < 0)
{ {
throw util::exception("Core factor must be between 0.0 to 1.0 (inclusive)"); throw util::exception("Core factor must be between 0.0 to 1.0 (inclusive)" + SOURCE_REF);
} }
TIMER_START(preparing); TIMER_START(preparing);
util::SimpleLogger().Write() << "Loading edge-expanded graph representation"; util::Log() << "Loading edge-expanded graph representation";
util::DeallocatingVector<extractor::EdgeBasedEdge> edge_based_edge_list; util::DeallocatingVector<extractor::EdgeBasedEdge> edge_based_edge_list;
@ -163,7 +164,7 @@ int Contractor::Run()
ReadNodeLevels(node_levels); ReadNodeLevels(node_levels);
} }
util::SimpleLogger().Write() << "Reading node weights."; util::Log() << "Reading node weights.";
std::vector<EdgeWeight> node_weights; std::vector<EdgeWeight> node_weights;
std::string node_file_name = config.osrm_input_path.string() + ".enw"; std::string node_file_name = config.osrm_input_path.string() + ".enw";
@ -172,7 +173,7 @@ int Contractor::Run()
storage::io::FileReader::VerifyFingerprint); storage::io::FileReader::VerifyFingerprint);
node_file.DeserializeVector(node_weights); node_file.DeserializeVector(node_weights);
} }
util::SimpleLogger().Write() << "Done reading node weights."; util::Log() << "Done reading node weights.";
util::DeallocatingVector<QueryEdge> contracted_edge_list; util::DeallocatingVector<QueryEdge> contracted_edge_list;
ContractGraph(max_edge_id, ContractGraph(max_edge_id,
@ -183,7 +184,7 @@ int Contractor::Run()
node_levels); node_levels);
TIMER_STOP(contraction); TIMER_STOP(contraction);
util::SimpleLogger().Write() << "Contraction took " << TIMER_SEC(contraction) << " sec"; util::Log() << "Contraction took " << TIMER_SEC(contraction) << " sec";
std::size_t number_of_used_edges = WriteContractedGraph(max_edge_id, contracted_edge_list); std::size_t number_of_used_edges = WriteContractedGraph(max_edge_id, contracted_edge_list);
WriteCoreNodeMarker(std::move(is_core_node)); WriteCoreNodeMarker(std::move(is_core_node));
@ -199,11 +200,11 @@ int Contractor::Run()
const auto edges_per_second = const auto edges_per_second =
static_cast<std::uint64_t>(number_of_used_edges / TIMER_SEC(contraction)); static_cast<std::uint64_t>(number_of_used_edges / TIMER_SEC(contraction));
util::SimpleLogger().Write() << "Preprocessing : " << TIMER_SEC(preparing) << " seconds"; util::Log() << "Preprocessing : " << TIMER_SEC(preparing) << " seconds";
util::SimpleLogger().Write() << "Contraction: " << nodes_per_second << " nodes/sec and " util::Log() << "Contraction: " << nodes_per_second << " nodes/sec and " << edges_per_second
<< edges_per_second << " edges/sec"; << " edges/sec";
util::SimpleLogger().Write() << "finished preprocessing"; util::Log() << "finished preprocessing";
return 0; return 0;
} }
@ -309,10 +310,13 @@ parse_segment_lookup_from_csv_files(const std::vector<std::string> &segment_spee
std::uint64_t to_node_id{}; std::uint64_t to_node_id{};
unsigned speed{}; unsigned speed{};
std::size_t line_number = 0;
std::for_each( std::for_each(
segment_speed_file_reader.GetLineIteratorBegin(), segment_speed_file_reader.GetLineIteratorBegin(),
segment_speed_file_reader.GetLineIteratorEnd(), segment_speed_file_reader.GetLineIteratorEnd(),
[&](const std::string &line) { [&](const std::string &line) {
++line_number;
using namespace boost::spirit::qi; using namespace boost::spirit::qi;
@ -329,7 +333,11 @@ parse_segment_lookup_from_csv_files(const std::vector<std::string> &segment_spee
speed); // speed); //
if (!ok || it != last) if (!ok || it != last)
throw util::exception{"Segment speed file " + filename + " malformed"}; {
const std::string message{"Segment speed file " + filename +
" malformed on line " + std::to_string(line_number)};
throw util::exception(message + SOURCE_REF);
}
SegmentSpeedSource val{{OSMNodeID{from_node_id}, OSMNodeID{to_node_id}}, SegmentSpeedSource val{{OSMNodeID{from_node_id}, OSMNodeID{to_node_id}},
{speed, static_cast<std::uint8_t>(file_id)}}; {speed, static_cast<std::uint8_t>(file_id)}};
@ -337,8 +345,7 @@ parse_segment_lookup_from_csv_files(const std::vector<std::string> &segment_spee
local.push_back(std::move(val)); local.push_back(std::move(val));
}); });
util::SimpleLogger().Write() << "Loaded speed file " << filename << " with " << local.size() util::Log() << "Loaded speed file " << filename << " with " << local.size() << " speeds";
<< " speeds";
{ {
Mutex::scoped_lock _{flatten_mutex}; Mutex::scoped_lock _{flatten_mutex};
@ -349,7 +356,14 @@ parse_segment_lookup_from_csv_files(const std::vector<std::string> &segment_spee
} }
}; };
tbb::parallel_for(std::size_t{0}, segment_speed_filenames.size(), parse_segment_speed_file); try
{
tbb::parallel_for(std::size_t{0}, segment_speed_filenames.size(), parse_segment_speed_file);
}
catch (const tbb::captured_exception &e)
{
throw util::exception(e.what() + SOURCE_REF);
}
// With flattened map-ish view of all the files, sort and unique them on from,to,source // With flattened map-ish view of all the files, sort and unique them on from,to,source
// The greater '>' is used here since we want to give files later on higher precedence // The greater '>' is used here since we want to give files later on higher precedence
@ -370,9 +384,8 @@ parse_segment_lookup_from_csv_files(const std::vector<std::string> &segment_spee
flatten.erase(it, end(flatten)); flatten.erase(it, end(flatten));
util::SimpleLogger().Write() << "In total loaded " << segment_speed_filenames.size() util::Log() << "In total loaded " << segment_speed_filenames.size()
<< " speed file(s) with a total of " << flatten.size() << " speed file(s) with a total of " << flatten.size() << " unique values";
<< " unique values";
return flatten; return flatten;
} }
@ -399,10 +412,13 @@ parse_turn_penalty_lookup_from_csv_files(const std::vector<std::string> &turn_pe
std::uint64_t to_node_id{}; std::uint64_t to_node_id{};
double penalty{}; double penalty{};
std::size_t line_number = 0;
std::for_each( std::for_each(
turn_penalty_file_reader.GetLineIteratorBegin(), turn_penalty_file_reader.GetLineIteratorBegin(),
turn_penalty_file_reader.GetLineIteratorEnd(), turn_penalty_file_reader.GetLineIteratorEnd(),
[&](const std::string &line) { [&](const std::string &line) {
++line_number;
using namespace boost::spirit::qi; using namespace boost::spirit::qi;
@ -420,7 +436,11 @@ parse_turn_penalty_lookup_from_csv_files(const std::vector<std::string> &turn_pe
penalty); // penalty); //
if (!ok || it != last) if (!ok || it != last)
throw util::exception{"Turn penalty file " + filename + " malformed"}; {
const std::string message{"Turn penalty file " + filename +
" malformed on line " + std::to_string(line_number)};
throw util::exception(message + SOURCE_REF);
}
TurnPenaltySource val{ TurnPenaltySource val{
{OSMNodeID{from_node_id}, OSMNodeID{via_node_id}, OSMNodeID{to_node_id}}, {OSMNodeID{from_node_id}, OSMNodeID{via_node_id}, OSMNodeID{to_node_id}},
@ -428,8 +448,8 @@ parse_turn_penalty_lookup_from_csv_files(const std::vector<std::string> &turn_pe
local.push_back(std::move(val)); local.push_back(std::move(val));
}); });
util::SimpleLogger().Write() << "Loaded penalty file " << filename << " with " util::Log() << "Loaded penalty file " << filename << " with " << local.size()
<< local.size() << " turn penalties"; << " turn penalties";
{ {
Mutex::scoped_lock _{flatten_mutex}; Mutex::scoped_lock _{flatten_mutex};
@ -440,7 +460,14 @@ parse_turn_penalty_lookup_from_csv_files(const std::vector<std::string> &turn_pe
} }
}; };
tbb::parallel_for(std::size_t{0}, turn_penalty_filenames.size(), parse_turn_penalty_file); try
{
tbb::parallel_for(std::size_t{0}, turn_penalty_filenames.size(), parse_turn_penalty_file);
}
catch (const tbb::captured_exception &e)
{
throw util::exception(e.what() + SOURCE_REF);
}
// With flattened map-ish view of all the files, sort and unique them on from,to,source // With flattened map-ish view of all the files, sort and unique them on from,to,source
// The greater '>' is used here since we want to give files later on higher precedence // The greater '>' is used here since we want to give files later on higher precedence
@ -463,9 +490,8 @@ parse_turn_penalty_lookup_from_csv_files(const std::vector<std::string> &turn_pe
map.erase(it, end(map)); map.erase(it, end(map));
util::SimpleLogger().Write() << "In total loaded " << turn_penalty_filenames.size() util::Log() << "In total loaded " << turn_penalty_filenames.size()
<< " turn penalty file(s) with a total of " << map.size() << " turn penalty file(s) with a total of " << map.size() << " unique values";
<< " unique values";
return map; return map;
} }
@ -486,9 +512,10 @@ EdgeID Contractor::LoadEdgeExpandedGraph(
const double log_edge_updates_factor) const double log_edge_updates_factor)
{ {
if (segment_speed_filenames.size() > 255 || turn_penalty_filenames.size() > 255) if (segment_speed_filenames.size() > 255 || turn_penalty_filenames.size() > 255)
throw util::exception("Limit of 255 segment speed and turn penalty files each reached"); throw util::exception("Limit of 255 segment speed and turn penalty files each reached" +
SOURCE_REF);
util::SimpleLogger().Write() << "Opening " << edge_based_graph_filename; util::Log() << "Opening " << edge_based_graph_filename;
auto mmap_file = [](const std::string &filename) { auto mmap_file = [](const std::string &filename) {
using boost::interprocess::file_mapping; using boost::interprocess::file_mapping;
@ -542,8 +569,7 @@ EdgeID Contractor::LoadEdgeExpandedGraph(
graph_header.fingerprint.TestContractor(fingerprint_valid); graph_header.fingerprint.TestContractor(fingerprint_valid);
edge_based_edge_list.resize(graph_header.number_of_edges); edge_based_edge_list.resize(graph_header.number_of_edges);
util::SimpleLogger().Write() << "Reading " << graph_header.number_of_edges util::Log() << "Reading " << graph_header.number_of_edges << " edges from the edge based graph";
<< " edges from the edge based graph";
SegmentSpeedSourceFlatMap segment_speed_lookup; SegmentSpeedSourceFlatMap segment_speed_lookup;
TurnPenaltySourceFlatMap turn_penalty_lookup; TurnPenaltySourceFlatMap turn_penalty_lookup;
@ -735,15 +761,15 @@ EdgeID Contractor::LoadEdgeExpandedGraph(
{ {
if (i == LUA_SOURCE) if (i == LUA_SOURCE)
{ {
util::SimpleLogger().Write() << "Used " << merged_counters[LUA_SOURCE] util::Log() << "Used " << merged_counters[LUA_SOURCE]
<< " speeds from LUA profile or input map"; << " speeds from LUA profile or input map";
} }
else else
{ {
// segments_speeds_counters has 0 as LUA, segment_speed_filenames not, thus we need // segments_speeds_counters has 0 as LUA, segment_speed_filenames not, thus we need
// to susbstract 1 to avoid off-by-one error // to susbstract 1 to avoid off-by-one error
util::SimpleLogger().Write() << "Used " << merged_counters[i] << " speeds from " util::Log() << "Used " << merged_counters[i] << " speeds from "
<< segment_speed_filenames[i - 1]; << segment_speed_filenames[i - 1];
} }
} }
} }
@ -756,7 +782,8 @@ EdgeID Contractor::LoadEdgeExpandedGraph(
std::ofstream geometry_stream(geometry_filename, std::ios::binary); std::ofstream geometry_stream(geometry_filename, std::ios::binary);
if (!geometry_stream) if (!geometry_stream)
{ {
throw util::exception("Failed to open " + geometry_filename + " for writing"); const std::string message{"Failed to open " + geometry_filename + " for writing"};
throw util::exception(message + SOURCE_REF);
} }
const unsigned number_of_indices = m_geometry_indices.size(); const unsigned number_of_indices = m_geometry_indices.size();
const unsigned number_of_compressed_geometries = m_geometry_node_list.size(); const unsigned number_of_compressed_geometries = m_geometry_node_list.size();
@ -777,7 +804,9 @@ EdgeID Contractor::LoadEdgeExpandedGraph(
std::ofstream datasource_stream(datasource_indexes_filename, std::ios::binary); std::ofstream datasource_stream(datasource_indexes_filename, std::ios::binary);
if (!datasource_stream) if (!datasource_stream)
{ {
throw util::exception("Failed to open " + datasource_indexes_filename + " for writing"); const std::string message{"Failed to open " + datasource_indexes_filename +
" for writing"};
throw util::exception(message + SOURCE_REF);
} }
std::uint64_t number_of_datasource_entries = m_geometry_datasource.size(); std::uint64_t number_of_datasource_entries = m_geometry_datasource.size();
datasource_stream.write(reinterpret_cast<const char *>(&number_of_datasource_entries), datasource_stream.write(reinterpret_cast<const char *>(&number_of_datasource_entries),
@ -793,7 +822,9 @@ EdgeID Contractor::LoadEdgeExpandedGraph(
std::ofstream datasource_stream(datasource_names_filename, std::ios::binary); std::ofstream datasource_stream(datasource_names_filename, std::ios::binary);
if (!datasource_stream) if (!datasource_stream)
{ {
throw util::exception("Failed to open " + datasource_names_filename + " for writing"); const std::string message{"Failed to open " + datasource_names_filename +
" for writing"};
throw util::exception(message + SOURCE_REF);
} }
datasource_stream << "lua profile" << std::endl; datasource_stream << "lua profile" << std::endl;
for (auto const &name : segment_speed_filenames) for (auto const &name : segment_speed_filenames)
@ -894,11 +925,11 @@ EdgeID Contractor::LoadEdgeExpandedGraph(
if (new_turn_weight + new_weight < compressed_edge_nodes) if (new_turn_weight + new_weight < compressed_edge_nodes)
{ {
util::SimpleLogger().Write(logWARNING) util::Log(logWARNING) << "turn penalty " << turn_iter->penalty_source.penalty
<< "turn penalty " << turn_iter->penalty_source.penalty << " for turn " << " for turn " << penaltyblock->from_id << ", "
<< penaltyblock->from_id << ", " << penaltyblock->via_id << ", " << penaltyblock->via_id << ", " << penaltyblock->to_id
<< penaltyblock->to_id << " is too negative: clamping turn weight to " << " is too negative: clamping turn weight to "
<< compressed_edge_nodes; << compressed_edge_nodes;
} }
inbuffer.weight = std::max(new_turn_weight + new_weight, compressed_edge_nodes); inbuffer.weight = std::max(new_turn_weight + new_weight, compressed_edge_nodes);
@ -915,7 +946,7 @@ EdgeID Contractor::LoadEdgeExpandedGraph(
edge_based_edge_list.emplace_back(std::move(inbuffer)); edge_based_edge_list.emplace_back(std::move(inbuffer));
} }
util::SimpleLogger().Write() << "Done reading edges"; util::Log() << "Done reading edges";
return graph_header.max_edge_id; return graph_header.max_edge_id;
} }
@ -964,8 +995,7 @@ Contractor::WriteContractedGraph(unsigned max_node_id,
// Sorting contracted edges in a way that the static query graph can read some in in-place. // Sorting contracted edges in a way that the static query graph can read some in in-place.
tbb::parallel_sort(contracted_edge_list.begin(), contracted_edge_list.end()); tbb::parallel_sort(contracted_edge_list.begin(), contracted_edge_list.end());
const std::uint64_t contracted_edge_count = contracted_edge_list.size(); const std::uint64_t contracted_edge_count = contracted_edge_list.size();
util::SimpleLogger().Write() << "Serializing compacted graph of " << contracted_edge_count util::Log() << "Serializing compacted graph of " << contracted_edge_count << " edges";
<< " edges";
const util::FingerPrint fingerprint = util::FingerPrint::GetValid(); const util::FingerPrint fingerprint = util::FingerPrint::GetValid();
boost::filesystem::ofstream hsgr_output_stream(config.graph_output_path, std::ios::binary); boost::filesystem::ofstream hsgr_output_stream(config.graph_output_path, std::ios::binary);
@ -982,15 +1012,14 @@ Contractor::WriteContractedGraph(unsigned max_node_id,
return tmp_max; return tmp_max;
}(); }();
util::SimpleLogger().Write(logDEBUG) << "input graph has " << (max_node_id + 1) << " nodes"; util::Log(logDEBUG) << "input graph has " << (max_node_id + 1) << " nodes";
util::SimpleLogger().Write(logDEBUG) << "contracted graph has " << (max_used_node_id + 1) util::Log(logDEBUG) << "contracted graph has " << (max_used_node_id + 1) << " nodes";
<< " nodes";
std::vector<util::StaticGraph<EdgeData>::NodeArrayEntry> node_array; std::vector<util::StaticGraph<EdgeData>::NodeArrayEntry> node_array;
// make sure we have at least one sentinel // make sure we have at least one sentinel
node_array.resize(max_node_id + 2); node_array.resize(max_node_id + 2);
util::SimpleLogger().Write() << "Building node array"; util::Log() << "Building node array";
util::StaticGraph<EdgeData>::EdgeIterator edge = 0; util::StaticGraph<EdgeData>::EdgeIterator edge = 0;
util::StaticGraph<EdgeData>::EdgeIterator position = 0; util::StaticGraph<EdgeData>::EdgeIterator position = 0;
util::StaticGraph<EdgeData>::EdgeIterator last_edge; util::StaticGraph<EdgeData>::EdgeIterator last_edge;
@ -1014,11 +1043,11 @@ Contractor::WriteContractedGraph(unsigned max_node_id,
node_array[sentinel_counter].first_edge = contracted_edge_count; node_array[sentinel_counter].first_edge = contracted_edge_count;
} }
util::SimpleLogger().Write() << "Serializing node array"; util::Log() << "Serializing node array";
RangebasedCRC32 crc32_calculator; RangebasedCRC32 crc32_calculator;
const unsigned edges_crc32 = crc32_calculator(contracted_edge_list); const unsigned edges_crc32 = crc32_calculator(contracted_edge_list);
util::SimpleLogger().Write() << "Writing CRC32: " << edges_crc32; util::Log() << "Writing CRC32: " << edges_crc32;
const std::uint64_t node_array_size = node_array.size(); const std::uint64_t node_array_size = node_array.size();
// serialize crc32, aka checksum // serialize crc32, aka checksum
@ -1036,7 +1065,7 @@ Contractor::WriteContractedGraph(unsigned max_node_id,
} }
// serialize all edges // serialize all edges
util::SimpleLogger().Write() << "Building edge array"; util::Log() << "Building edge array";
std::size_t number_of_used_edges = 0; std::size_t number_of_used_edges = 0;
util::StaticGraph<EdgeData>::EdgeArrayEntry current_edge; util::StaticGraph<EdgeData>::EdgeArrayEntry current_edge;
@ -1055,15 +1084,15 @@ Contractor::WriteContractedGraph(unsigned max_node_id,
#ifndef NDEBUG #ifndef NDEBUG
if (current_edge.data.weight <= 0) if (current_edge.data.weight <= 0)
{ {
util::SimpleLogger().Write(logWARNING) util::Log(logWARNING) << "Edge: " << edge
<< "Edge: " << edge << ",source: " << contracted_edge_list[edge].source << ",source: " << contracted_edge_list[edge].source
<< ", target: " << contracted_edge_list[edge].target << ", target: " << contracted_edge_list[edge].target
<< ", weight: " << current_edge.data.weight; << ", weight: " << current_edge.data.weight;
util::SimpleLogger().Write(logWARNING) << "Failed at adjacency list of node " util::Log(logWARNING) << "Failed at adjacency list of node "
<< contracted_edge_list[edge].source << "/" << contracted_edge_list[edge].source << "/"
<< node_array.size() - 1; << node_array.size() - 1;
return 1; throw util::exception("Edge weight is <= 0" + SOURCE_REF);
} }
#endif #endif
hsgr_output_stream.write((char *)&current_edge, hsgr_output_stream.write((char *)&current_edge,

View File

@ -7,7 +7,7 @@
#include "engine/datafacade/shared_memory_datafacade.hpp" #include "engine/datafacade/shared_memory_datafacade.hpp"
#include "storage/shared_barriers.hpp" #include "storage/shared_barriers.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include <boost/assert.hpp> #include <boost/assert.hpp>
#include <boost/interprocess/sync/named_condition.hpp> #include <boost/interprocess/sync/named_condition.hpp>
@ -68,7 +68,9 @@ Engine::Engine(const EngineConfig &config)
if (!DataWatchdog::TryConnect()) if (!DataWatchdog::TryConnect())
{ {
throw util::exception( throw util::exception(
"No shared memory blocks found, have you forgotten to run osrm-datastore?"); std::string(
"No shared memory blocks found, have you forgotten to run osrm-datastore?") +
SOURCE_REF);
} }
watchdog = std::make_unique<DataWatchdog>(); watchdog = std::make_unique<DataWatchdog>();
@ -78,7 +80,7 @@ Engine::Engine(const EngineConfig &config)
{ {
if (!config.storage_config.IsValid()) if (!config.storage_config.IsValid())
{ {
throw util::exception("Invalid file paths given!"); throw util::exception("Invalid file paths given!" + SOURCE_REF);
} }
immutable_data_facade = immutable_data_facade =
std::make_shared<datafacade::ProcessMemoryDataFacade>(config.storage_config); std::make_shared<datafacade::ProcessMemoryDataFacade>(config.storage_config);

View File

@ -1,5 +1,5 @@
#include "extractor/compressed_edge_container.hpp" #include "extractor/compressed_edge_container.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include <boost/assert.hpp> #include <boost/assert.hpp>
#include <boost/filesystem.hpp> #include <boost/filesystem.hpp>
@ -300,14 +300,13 @@ void CompressedEdgeContainer::PrintStatistics() const
longest_chain_length = std::max(longest_chain_length, (uint64_t)current_vector.size()); longest_chain_length = std::max(longest_chain_length, (uint64_t)current_vector.size());
} }
util::SimpleLogger().Write() util::Log() << "Geometry successfully removed:"
<< "Geometry successfully removed:" "\n compressed edges: "
"\n compressed edges: " << compressed_edges << "\n compressed geometries: " << compressed_geometries
<< compressed_edges << "\n compressed geometries: " << compressed_geometries << "\n longest chain length: " << longest_chain_length << "\n cmpr ratio: "
<< "\n longest chain length: " << longest_chain_length << "\n cmpr ratio: " << ((float)compressed_edges / std::max(compressed_geometries, (uint64_t)1))
<< ((float)compressed_edges / std::max(compressed_geometries, (uint64_t)1)) << "\n avg chain length: "
<< "\n avg chain length: " << (float)compressed_geometries / std::max((uint64_t)1, compressed_edges);
<< (float)compressed_geometries / std::max((uint64_t)1, compressed_edges);
} }
const CompressedEdgeContainer::OnewayEdgeBucket & const CompressedEdgeContainer::OnewayEdgeBucket &

View File

@ -6,8 +6,8 @@
#include "util/exception.hpp" #include "util/exception.hpp"
#include "util/guidance/turn_bearing.hpp" #include "util/guidance/turn_bearing.hpp"
#include "util/integer_range.hpp" #include "util/integer_range.hpp"
#include "util/log.hpp"
#include "util/percent.hpp" #include "util/percent.hpp"
#include "util/simple_logger.hpp"
#include "util/timing_util.hpp" #include "util/timing_util.hpp"
#include "extractor/guidance/toolkit.hpp" #include "extractor/guidance/toolkit.hpp"
@ -213,10 +213,10 @@ void EdgeBasedGraphFactory::Run(ScriptingEnvironment &scripting_environment,
TIMER_STOP(generate_edges); TIMER_STOP(generate_edges);
util::SimpleLogger().Write() << "Timing statistics for edge-expanded graph:"; util::Log() << "Timing statistics for edge-expanded graph:";
util::SimpleLogger().Write() << "Renumbering edges: " << TIMER_SEC(renumber) << "s"; util::Log() << "Renumbering edges: " << TIMER_SEC(renumber) << "s";
util::SimpleLogger().Write() << "Generating nodes: " << TIMER_SEC(generate_nodes) << "s"; util::Log() << "Generating nodes: " << TIMER_SEC(generate_nodes) << "s";
util::SimpleLogger().Write() << "Generating edges: " << TIMER_SEC(generate_edges) << "s"; util::Log() << "Generating edges: " << TIMER_SEC(generate_edges) << "s";
} }
/// Renumbers all _forward_ edges and sets the edge_id. /// Renumbers all _forward_ edges and sets the edge_id.
@ -258,40 +258,44 @@ unsigned EdgeBasedGraphFactory::RenumberEdges()
/// Creates the nodes in the edge expanded graph from edges in the node-based graph. /// Creates the nodes in the edge expanded graph from edges in the node-based graph.
void EdgeBasedGraphFactory::GenerateEdgeExpandedNodes() void EdgeBasedGraphFactory::GenerateEdgeExpandedNodes()
{ {
util::Percent progress(m_node_based_graph->GetNumberOfNodes()); util::Log() << "Generating edge expanded nodes ... ";
m_compressed_edge_container.InitializeBothwayVector();
// loop over all edges and generate new set of nodes
for (const auto node_u : util::irange(0u, m_node_based_graph->GetNumberOfNodes()))
{ {
BOOST_ASSERT(node_u != SPECIAL_NODEID); util::UnbufferedLog log;
BOOST_ASSERT(node_u < m_node_based_graph->GetNumberOfNodes()); util::Percent progress(log, m_node_based_graph->GetNumberOfNodes());
progress.PrintStatus(node_u);
for (EdgeID e1 : m_node_based_graph->GetAdjacentEdgeRange(node_u)) m_compressed_edge_container.InitializeBothwayVector();
// loop over all edges and generate new set of nodes
for (const auto node_u : util::irange(0u, m_node_based_graph->GetNumberOfNodes()))
{ {
const EdgeData &edge_data = m_node_based_graph->GetEdgeData(e1); BOOST_ASSERT(node_u != SPECIAL_NODEID);
BOOST_ASSERT(e1 != SPECIAL_EDGEID); BOOST_ASSERT(node_u < m_node_based_graph->GetNumberOfNodes());
const NodeID node_v = m_node_based_graph->GetTarget(e1); progress.PrintStatus(node_u);
for (EdgeID e1 : m_node_based_graph->GetAdjacentEdgeRange(node_u))
BOOST_ASSERT(SPECIAL_NODEID != node_v);
// pick only every other edge, since we have every edge as an outgoing
// and incoming egde
if (node_u > node_v)
{ {
continue; const EdgeData &edge_data = m_node_based_graph->GetEdgeData(e1);
} BOOST_ASSERT(e1 != SPECIAL_EDGEID);
const NodeID node_v = m_node_based_graph->GetTarget(e1);
BOOST_ASSERT(node_u < node_v); BOOST_ASSERT(SPECIAL_NODEID != node_v);
// pick only every other edge, since we have every edge as an outgoing
// and incoming egde
if (node_u > node_v)
{
continue;
}
// if we found a non-forward edge reverse and try again BOOST_ASSERT(node_u < node_v);
if (edge_data.edge_id == SPECIAL_NODEID)
{ // if we found a non-forward edge reverse and try again
InsertEdgeBasedNode(node_v, node_u); if (edge_data.edge_id == SPECIAL_NODEID)
} {
else InsertEdgeBasedNode(node_v, node_u);
{ }
InsertEdgeBasedNode(node_u, node_v); else
{
InsertEdgeBasedNode(node_u, node_v);
}
} }
} }
} }
@ -299,8 +303,7 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedNodes()
BOOST_ASSERT(m_edge_based_node_list.size() == m_edge_based_node_is_startpoint.size()); BOOST_ASSERT(m_edge_based_node_list.size() == m_edge_based_node_is_startpoint.size());
BOOST_ASSERT(m_max_edge_id + 1 == m_edge_based_node_weights.size()); BOOST_ASSERT(m_max_edge_id + 1 == m_edge_based_node_weights.size());
util::SimpleLogger().Write() << "Generated " << m_edge_based_node_list.size() util::Log() << "Generated " << m_edge_based_node_list.size() << " nodes in edge-expanded graph";
<< " nodes in edge-expanded graph";
} }
/// Actually it also generates OriginalEdgeData and serializes them... /// Actually it also generates OriginalEdgeData and serializes them...
@ -312,7 +315,7 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
const std::string &edge_fixed_penalties_filename, const std::string &edge_fixed_penalties_filename,
const bool generate_edge_lookup) const bool generate_edge_lookup)
{ {
util::SimpleLogger().Write() << "generating edge-expanded edges"; util::Log() << "Generating edge-expanded edges ";
std::size_t node_based_edge_counter = 0; std::size_t node_based_edge_counter = 0;
std::size_t original_edges_counter = 0; std::size_t original_edges_counter = 0;
@ -341,7 +344,6 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
// Loop over all turns and generate new set of edges. // Loop over all turns and generate new set of edges.
// Three nested loop look super-linear, but we are dealing with a (kind of) // Three nested loop look super-linear, but we are dealing with a (kind of)
// linear number of turns only. // linear number of turns only.
util::Percent progress(m_node_based_graph->GetNumberOfNodes());
SuffixTable street_name_suffix_table(scripting_environment); SuffixTable street_name_suffix_table(scripting_environment);
guidance::TurnAnalysis turn_analysis(*m_node_based_graph, guidance::TurnAnalysis turn_analysis(*m_node_based_graph,
m_node_info_list, m_node_info_list,
@ -363,260 +365,275 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
bearing_class_by_node_based_node.resize(m_node_based_graph->GetNumberOfNodes(), bearing_class_by_node_based_node.resize(m_node_based_graph->GetNumberOfNodes(),
std::numeric_limits<std::uint32_t>::max()); std::numeric_limits<std::uint32_t>::max());
// going over all nodes (which form the center of an intersection), we compute all
// possible turns along these intersections.
for (const auto node_at_center_of_intersection :
util::irange(0u, m_node_based_graph->GetNumberOfNodes()))
{ {
progress.PrintStatus(node_at_center_of_intersection); util::UnbufferedLog log;
const auto shape_result = util::Percent progress(log, m_node_based_graph->GetNumberOfNodes());
turn_analysis.ComputeIntersectionShapes(node_at_center_of_intersection); // going over all nodes (which form the center of an intersection), we compute all
// possible turns along these intersections.
// all nodes in the graph are connected in both directions. We check all outgoing nodes to for (const auto node_at_center_of_intersection :
// find the incoming edge. This is a larger search overhead, but the cost we need to pay to util::irange(0u, m_node_based_graph->GetNumberOfNodes()))
// generate edges here is worth the additional search overhead.
//
// a -> b <-> c
// |
// v
// d
//
// will have:
// a: b,rev=0
// b: a,rev=1 c,rev=0 d,rev=0
// c: b,rev=0
//
// From the flags alone, we cannot determine which nodes are connected to `b` by an outgoing
// edge. Therefore, we have to search all connected edges for edges entering `b`
for (const EdgeID outgoing_edge :
m_node_based_graph->GetAdjacentEdgeRange(node_at_center_of_intersection))
{ {
const NodeID node_along_road_entering = m_node_based_graph->GetTarget(outgoing_edge); progress.PrintStatus(node_at_center_of_intersection);
const auto incoming_edge = m_node_based_graph->FindEdge(node_along_road_entering, const auto shape_result =
node_at_center_of_intersection); turn_analysis.ComputeIntersectionShapes(node_at_center_of_intersection);
if (m_node_based_graph->GetEdgeData(incoming_edge).reversed) // all nodes in the graph are connected in both directions. We check all outgoing nodes
continue; // to
// find the incoming edge. This is a larger search overhead, but the cost we need to pay
++node_based_edge_counter; // to
// generate edges here is worth the additional search overhead.
auto intersection_with_flags_and_angles = //
turn_analysis.GetIntersectionGenerator().TransformIntersectionShapeIntoView( // a -> b <-> c
node_along_road_entering, // |
incoming_edge, // v
shape_result.normalised_intersection_shape, // d
shape_result.intersection_shape, //
shape_result.merging_map); // will have:
// a: b,rev=0
auto intersection = turn_analysis.AssignTurnTypes( // b: a,rev=1 c,rev=0 d,rev=0
node_along_road_entering, incoming_edge, intersection_with_flags_and_angles); // c: b,rev=0
//
BOOST_ASSERT(intersection.valid()); // From the flags alone, we cannot determine which nodes are connected to `b` by an
// outgoing
intersection = turn_lane_handler.assignTurnLanes( // edge. Therefore, we have to search all connected edges for edges entering `b`
node_along_road_entering, incoming_edge, std::move(intersection)); for (const EdgeID outgoing_edge :
m_node_based_graph->GetAdjacentEdgeRange(node_at_center_of_intersection))
// the entry class depends on the turn, so we have to classify the interesction for
// every edge
const auto turn_classification = classifyIntersection(intersection);
const auto entry_class_id = [&](const util::guidance::EntryClass entry_class) {
if (0 == entry_class_hash.count(entry_class))
{
const auto id = static_cast<std::uint16_t>(entry_class_hash.size());
entry_class_hash[entry_class] = id;
return id;
}
else
{
return entry_class_hash.find(entry_class)->second;
}
}(turn_classification.first);
const auto bearing_class_id = [&](const util::guidance::BearingClass bearing_class) {
if (0 == bearing_class_hash.count(bearing_class))
{
const auto id = static_cast<std::uint32_t>(bearing_class_hash.size());
bearing_class_hash[bearing_class] = id;
return id;
}
else
{
return bearing_class_hash.find(bearing_class)->second;
}
}(turn_classification.second);
bearing_class_by_node_based_node[node_at_center_of_intersection] = bearing_class_id;
for (const auto &turn : intersection)
{ {
// only keep valid turns const NodeID node_along_road_entering =
if (!turn.entry_allowed) m_node_based_graph->GetTarget(outgoing_edge);
const auto incoming_edge = m_node_based_graph->FindEdge(
node_along_road_entering, node_at_center_of_intersection);
if (m_node_based_graph->GetEdgeData(incoming_edge).reversed)
continue; continue;
// only add an edge if turn is not prohibited ++node_based_edge_counter;
const EdgeData &edge_data1 = m_node_based_graph->GetEdgeData(incoming_edge);
const EdgeData &edge_data2 = m_node_based_graph->GetEdgeData(turn.eid);
BOOST_ASSERT(edge_data1.edge_id != edge_data2.edge_id); auto intersection_with_flags_and_angles =
BOOST_ASSERT(!edge_data1.reversed); turn_analysis.GetIntersectionGenerator().TransformIntersectionShapeIntoView(
BOOST_ASSERT(!edge_data2.reversed); node_along_road_entering,
incoming_edge,
shape_result.normalised_intersection_shape,
shape_result.intersection_shape,
shape_result.merging_map);
// the following is the core of the loop. auto intersection = turn_analysis.AssignTurnTypes(
unsigned distance = edge_data1.distance; node_along_road_entering, incoming_edge, intersection_with_flags_and_angles);
if (m_traffic_lights.find(node_at_center_of_intersection) != m_traffic_lights.end())
{
distance += profile_properties.traffic_signal_penalty;
}
const int32_t turn_penalty = BOOST_ASSERT(intersection.valid());
scripting_environment.GetTurnPenalty(180. - turn.angle);
const auto turn_instruction = turn.instruction; intersection = turn_lane_handler.assignTurnLanes(
if (turn_instruction.direction_modifier == guidance::DirectionModifier::UTurn) node_along_road_entering, incoming_edge, std::move(intersection));
{
distance += profile_properties.u_turn_penalty;
}
// don't add turn penalty if it is not an actual turn. This heuristic is necessary // the entry class depends on the turn, so we have to classify the interesction for
// since OSRM cannot handle looping roads/parallel roads // every edge
if (turn_instruction.type != guidance::TurnType::NoTurn) const auto turn_classification = classifyIntersection(intersection);
distance += turn_penalty;
const bool is_encoded_forwards = const auto entry_class_id = [&](const util::guidance::EntryClass entry_class) {
m_compressed_edge_container.HasZippedEntryForForwardID(incoming_edge); if (0 == entry_class_hash.count(entry_class))
const bool is_encoded_backwards =
m_compressed_edge_container.HasZippedEntryForReverseID(incoming_edge);
BOOST_ASSERT(is_encoded_forwards || is_encoded_backwards);
if (is_encoded_forwards)
{
original_edge_data_vector.emplace_back(
GeometryID{m_compressed_edge_container.GetZippedPositionForForwardID(
incoming_edge),
true},
edge_data1.name_id,
turn.lane_data_id,
turn_instruction,
entry_class_id,
edge_data1.travel_mode,
util::guidance::TurnBearing(intersection[0].bearing),
util::guidance::TurnBearing(turn.bearing));
}
else if (is_encoded_backwards)
{
original_edge_data_vector.emplace_back(
GeometryID{m_compressed_edge_container.GetZippedPositionForReverseID(
incoming_edge),
false},
edge_data1.name_id,
turn.lane_data_id,
turn_instruction,
entry_class_id,
edge_data1.travel_mode,
util::guidance::TurnBearing(intersection[0].bearing),
util::guidance::TurnBearing(turn.bearing));
}
++original_edges_counter;
if (original_edge_data_vector.size() > 1024 * 1024 * 10)
{
FlushVectorToStream(edge_data_file, original_edge_data_vector);
}
BOOST_ASSERT(SPECIAL_NODEID != edge_data1.edge_id);
BOOST_ASSERT(SPECIAL_NODEID != edge_data2.edge_id);
// NOTE: potential overflow here if we hit 2^32 routable edges
BOOST_ASSERT(m_edge_based_edge_list.size() <= std::numeric_limits<NodeID>::max());
m_edge_based_edge_list.emplace_back(edge_data1.edge_id,
edge_data2.edge_id,
m_edge_based_edge_list.size(),
distance,
true,
false);
BOOST_ASSERT(original_edges_counter == m_edge_based_edge_list.size());
// Here is where we write out the mapping between the edge-expanded edges, and
// the node-based edges that are originally used to calculate the `distance`
// for the edge-expanded edges. About 40 lines back, there is:
//
// unsigned distance = edge_data1.distance;
//
// This tells us that the weight for an edge-expanded-edge is based on the weight
// of the *source* node-based edge. Therefore, we will look up the individual
// segments of the source node-based edge, and write out a mapping between
// those and the edge-based-edge ID.
// External programs can then use this mapping to quickly perform
// updates to the edge-expanded-edge based directly on its ID.
if (generate_edge_lookup)
{
const auto node_based_edges =
m_compressed_edge_container.GetBucketReference(incoming_edge);
NodeID previous = node_along_road_entering;
const unsigned node_count = node_based_edges.size() + 1;
const QueryNode &first_node = m_node_info_list[previous];
lookup::SegmentHeaderBlock header = {node_count, first_node.node_id};
edge_segment_file.write(reinterpret_cast<const char *>(&header),
sizeof(header));
for (auto target_node : node_based_edges)
{ {
const QueryNode &from = m_node_info_list[previous]; const auto id = static_cast<std::uint16_t>(entry_class_hash.size());
const QueryNode &to = m_node_info_list[target_node.node_id]; entry_class_hash[entry_class] = id;
const double segment_length = return id;
util::coordinate_calculation::greatCircleDistance(from, to); }
else
{
return entry_class_hash.find(entry_class)->second;
}
}(turn_classification.first);
lookup::SegmentBlock nodeblock = { const auto bearing_class_id =
to.node_id, segment_length, target_node.weight}; [&](const util::guidance::BearingClass bearing_class) {
if (0 == bearing_class_hash.count(bearing_class))
{
const auto id = static_cast<std::uint32_t>(bearing_class_hash.size());
bearing_class_hash[bearing_class] = id;
return id;
}
else
{
return bearing_class_hash.find(bearing_class)->second;
}
}(turn_classification.second);
bearing_class_by_node_based_node[node_at_center_of_intersection] = bearing_class_id;
edge_segment_file.write(reinterpret_cast<const char *>(&nodeblock), for (const auto &turn : intersection)
sizeof(nodeblock)); {
previous = target_node.node_id; // only keep valid turns
if (!turn.entry_allowed)
continue;
// only add an edge if turn is not prohibited
const EdgeData &edge_data1 = m_node_based_graph->GetEdgeData(incoming_edge);
const EdgeData &edge_data2 = m_node_based_graph->GetEdgeData(turn.eid);
BOOST_ASSERT(edge_data1.edge_id != edge_data2.edge_id);
BOOST_ASSERT(!edge_data1.reversed);
BOOST_ASSERT(!edge_data2.reversed);
// the following is the core of the loop.
unsigned distance = edge_data1.distance;
if (m_traffic_lights.find(node_at_center_of_intersection) !=
m_traffic_lights.end())
{
distance += profile_properties.traffic_signal_penalty;
} }
// We also now write out the mapping between the edge-expanded edges and the const int32_t turn_penalty =
// original nodes. Since each edge represents a possible maneuver, external scripting_environment.GetTurnPenalty(180. - turn.angle);
// programs can use this to quickly perform updates to edge weights in order
// to penalize certain turns.
// If this edge is 'trivial' -- where the compressed edge corresponds const auto turn_instruction = turn.instruction;
// exactly to an original OSM segment -- we can pull the turn's preceding if (turn_instruction.direction_modifier == guidance::DirectionModifier::UTurn)
// node ID directly with `node_along_road_entering`; otherwise, we need to look {
// up the node distance += profile_properties.u_turn_penalty;
// immediately preceding the turn from the compressed edge container. }
const bool isTrivial = m_compressed_edge_container.IsTrivial(incoming_edge);
const auto &from_node = // don't add turn penalty if it is not an actual turn. This heuristic is
isTrivial // necessary
? m_node_info_list[node_along_road_entering] // since OSRM cannot handle looping roads/parallel roads
: m_node_info_list[m_compressed_edge_container.GetLastEdgeSourceID( if (turn_instruction.type != guidance::TurnType::NoTurn)
incoming_edge)]; distance += turn_penalty;
const auto &via_node =
m_node_info_list[m_compressed_edge_container.GetLastEdgeTargetID(
incoming_edge)];
const auto &to_node =
m_node_info_list[m_compressed_edge_container.GetFirstEdgeTargetID(
turn.eid)];
const unsigned fixed_penalty = distance - edge_data1.distance; const bool is_encoded_forwards =
lookup::PenaltyBlock penaltyblock = { m_compressed_edge_container.HasZippedEntryForForwardID(incoming_edge);
fixed_penalty, from_node.node_id, via_node.node_id, to_node.node_id}; const bool is_encoded_backwards =
edge_penalty_file.write(reinterpret_cast<const char *>(&penaltyblock), m_compressed_edge_container.HasZippedEntryForReverseID(incoming_edge);
sizeof(penaltyblock)); BOOST_ASSERT(is_encoded_forwards || is_encoded_backwards);
if (is_encoded_forwards)
{
original_edge_data_vector.emplace_back(
GeometryID{m_compressed_edge_container.GetZippedPositionForForwardID(
incoming_edge),
true},
edge_data1.name_id,
turn.lane_data_id,
turn_instruction,
entry_class_id,
edge_data1.travel_mode,
util::guidance::TurnBearing(intersection[0].bearing),
util::guidance::TurnBearing(turn.bearing));
}
else if (is_encoded_backwards)
{
original_edge_data_vector.emplace_back(
GeometryID{m_compressed_edge_container.GetZippedPositionForReverseID(
incoming_edge),
false},
edge_data1.name_id,
turn.lane_data_id,
turn_instruction,
entry_class_id,
edge_data1.travel_mode,
util::guidance::TurnBearing(intersection[0].bearing),
util::guidance::TurnBearing(turn.bearing));
}
++original_edges_counter;
if (original_edge_data_vector.size() > 1024 * 1024 * 10)
{
FlushVectorToStream(edge_data_file, original_edge_data_vector);
}
BOOST_ASSERT(SPECIAL_NODEID != edge_data1.edge_id);
BOOST_ASSERT(SPECIAL_NODEID != edge_data2.edge_id);
// NOTE: potential overflow here if we hit 2^32 routable edges
BOOST_ASSERT(m_edge_based_edge_list.size() <=
std::numeric_limits<NodeID>::max());
m_edge_based_edge_list.emplace_back(edge_data1.edge_id,
edge_data2.edge_id,
m_edge_based_edge_list.size(),
distance,
true,
false);
BOOST_ASSERT(original_edges_counter == m_edge_based_edge_list.size());
// Here is where we write out the mapping between the edge-expanded edges, and
// the node-based edges that are originally used to calculate the `distance`
// for the edge-expanded edges. About 40 lines back, there is:
//
// unsigned distance = edge_data1.distance;
//
// This tells us that the weight for an edge-expanded-edge is based on the
// weight
// of the *source* node-based edge. Therefore, we will look up the individual
// segments of the source node-based edge, and write out a mapping between
// those and the edge-based-edge ID.
// External programs can then use this mapping to quickly perform
// updates to the edge-expanded-edge based directly on its ID.
if (generate_edge_lookup)
{
const auto node_based_edges =
m_compressed_edge_container.GetBucketReference(incoming_edge);
NodeID previous = node_along_road_entering;
const unsigned node_count = node_based_edges.size() + 1;
const QueryNode &first_node = m_node_info_list[previous];
lookup::SegmentHeaderBlock header = {node_count, first_node.node_id};
edge_segment_file.write(reinterpret_cast<const char *>(&header),
sizeof(header));
for (auto target_node : node_based_edges)
{
const QueryNode &from = m_node_info_list[previous];
const QueryNode &to = m_node_info_list[target_node.node_id];
const double segment_length =
util::coordinate_calculation::greatCircleDistance(from, to);
lookup::SegmentBlock nodeblock = {
to.node_id, segment_length, target_node.weight};
edge_segment_file.write(reinterpret_cast<const char *>(&nodeblock),
sizeof(nodeblock));
previous = target_node.node_id;
}
// We also now write out the mapping between the edge-expanded edges and the
// original nodes. Since each edge represents a possible maneuver, external
// programs can use this to quickly perform updates to edge weights in order
// to penalize certain turns.
// If this edge is 'trivial' -- where the compressed edge corresponds
// exactly to an original OSM segment -- we can pull the turn's preceding
// node ID directly with `node_along_road_entering`; otherwise, we need to
// look
// up the node
// immediately preceding the turn from the compressed edge container.
const bool isTrivial = m_compressed_edge_container.IsTrivial(incoming_edge);
const auto &from_node =
isTrivial
? m_node_info_list[node_along_road_entering]
: m_node_info_list[m_compressed_edge_container.GetLastEdgeSourceID(
incoming_edge)];
const auto &via_node =
m_node_info_list[m_compressed_edge_container.GetLastEdgeTargetID(
incoming_edge)];
const auto &to_node =
m_node_info_list[m_compressed_edge_container.GetFirstEdgeTargetID(
turn.eid)];
const unsigned fixed_penalty = distance - edge_data1.distance;
lookup::PenaltyBlock penaltyblock = {
fixed_penalty, from_node.node_id, via_node.node_id, to_node.node_id};
edge_penalty_file.write(reinterpret_cast<const char *>(&penaltyblock),
sizeof(penaltyblock));
}
} }
} }
} }
} }
util::SimpleLogger().Write() << "Created " << entry_class_hash.size() << " entry classes and " util::Log() << "Created " << entry_class_hash.size() << " entry classes and "
<< bearing_class_hash.size() << " Bearing Classes"; << bearing_class_hash.size() << " Bearing Classes";
util::SimpleLogger().Write() << "Writing Turn Lane Data to File..."; util::Log() << "Writing Turn Lane Data to File...";
std::ofstream turn_lane_data_file(turn_lane_data_filename.c_str(), std::ios::binary); std::ofstream turn_lane_data_file(turn_lane_data_filename.c_str(), std::ios::binary);
std::vector<util::guidance::LaneTupleIdPair> lane_data(lane_data_map.size()); std::vector<util::guidance::LaneTupleIdPair> lane_data(lane_data_map.size());
// extract lane data sorted by ID // extract lane data sorted by ID
@ -630,7 +647,7 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
turn_lane_data_file.write(reinterpret_cast<const char *>(&lane_data[0]), turn_lane_data_file.write(reinterpret_cast<const char *>(&lane_data[0]),
sizeof(util::guidance::LaneTupleIdPair) * lane_data.size()); sizeof(util::guidance::LaneTupleIdPair) * lane_data.size());
util::SimpleLogger().Write() << "done."; util::Log() << "done.";
FlushVectorToStream(edge_data_file, original_edge_data_vector); FlushVectorToStream(edge_data_file, original_edge_data_vector);
@ -642,18 +659,15 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
edge_data_file.write(reinterpret_cast<const char *>(&length_prefix), sizeof(length_prefix)); edge_data_file.write(reinterpret_cast<const char *>(&length_prefix), sizeof(length_prefix));
util::SimpleLogger().Write() << "Generated " << m_edge_based_node_list.size() util::Log() << "Generated " << m_edge_based_node_list.size() << " edge based nodes";
<< " edge based nodes"; util::Log() << "Node-based graph contains " << node_based_edge_counter << " edges";
util::SimpleLogger().Write() << "Node-based graph contains " << node_based_edge_counter util::Log() << "Edge-expanded graph ...";
<< " edges"; util::Log() << " contains " << m_edge_based_edge_list.size() << " edges";
util::SimpleLogger().Write() << "Edge-expanded graph ..."; util::Log() << " skips " << restricted_turns_counter << " turns, "
util::SimpleLogger().Write() << " contains " << m_edge_based_edge_list.size() << " edges"; "defined by "
util::SimpleLogger().Write() << " skips " << restricted_turns_counter << " turns, " << m_restriction_map->size() << " restrictions";
"defined by " util::Log() << " skips " << skipped_uturns_counter << " U turns";
<< m_restriction_map->size() << " restrictions"; util::Log() << " skips " << skipped_barrier_turns_counter << " turns over barriers";
util::SimpleLogger().Write() << " skips " << skipped_uturns_counter << " U turns";
util::SimpleLogger().Write() << " skips " << skipped_barrier_turns_counter
<< " turns over barriers";
} }
std::vector<util::guidance::BearingClass> EdgeBasedGraphFactory::GetBearingClasses() const std::vector<util::guidance::BearingClass> EdgeBasedGraphFactory::GetBearingClasses() const

File diff suppressed because it is too large Load Diff

View File

@ -11,11 +11,13 @@
#include "extractor/raster_source.hpp" #include "extractor/raster_source.hpp"
#include "storage/io.hpp" #include "storage/io.hpp"
#include "storage/io.hpp" #include "storage/io.hpp"
#include "util/exception.hpp"
#include "util/exception_utils.hpp"
#include "util/graph_loader.hpp" #include "util/graph_loader.hpp"
#include "util/io.hpp" #include "util/io.hpp"
#include "util/log.hpp"
#include "util/name_table.hpp" #include "util/name_table.hpp"
#include "util/range_table.hpp" #include "util/range_table.hpp"
#include "util/simple_logger.hpp"
#include "util/timing_util.hpp" #include "util/timing_util.hpp"
#include "extractor/compressed_edge_container.hpp" #include "extractor/compressed_edge_container.hpp"
@ -119,12 +121,12 @@ int Extractor::run(ScriptingEnvironment &scripting_environment)
tbb::task_scheduler_init init(number_of_threads); tbb::task_scheduler_init init(number_of_threads);
{ {
util::SimpleLogger().Write() << "Input file: " << config.input_path.filename().string(); util::Log() << "Input file: " << config.input_path.filename().string();
if (!config.profile_path.empty()) if (!config.profile_path.empty())
{ {
util::SimpleLogger().Write() << "Profile: " << config.profile_path.filename().string(); util::Log() << "Profile: " << config.profile_path.filename().string();
} }
util::SimpleLogger().Write() << "Threads: " << number_of_threads; util::Log() << "Threads: " << number_of_threads;
ExtractionContainers extraction_containers; ExtractionContainers extraction_containers;
auto extractor_callbacks = std::make_unique<ExtractorCallbacks>(extraction_containers); auto extractor_callbacks = std::make_unique<ExtractorCallbacks>(extraction_containers);
@ -137,7 +139,7 @@ int Extractor::run(ScriptingEnvironment &scripting_environment)
unsigned number_of_ways = 0; unsigned number_of_ways = 0;
unsigned number_of_relations = 0; unsigned number_of_relations = 0;
util::SimpleLogger().Write() << "Parsing in progress.."; util::Log() << "Parsing in progress..";
TIMER_START(parsing); TIMER_START(parsing);
// setup raster sources // setup raster sources
@ -148,7 +150,7 @@ int Extractor::run(ScriptingEnvironment &scripting_environment)
{ {
generator = "unknown tool"; generator = "unknown tool";
} }
util::SimpleLogger().Write() << "input file generated by " << generator; util::Log() << "input file generated by " << generator;
// write .timestamp data file // write .timestamp data file
std::string timestamp = header.get("osmosis_replication_timestamp"); std::string timestamp = header.get("osmosis_replication_timestamp");
@ -156,7 +158,7 @@ int Extractor::run(ScriptingEnvironment &scripting_environment)
{ {
timestamp = "n/a"; timestamp = "n/a";
} }
util::SimpleLogger().Write() << "timestamp: " << timestamp; util::Log() << "timestamp: " << timestamp;
boost::filesystem::ofstream timestamp_out(config.timestamp_file_name); boost::filesystem::ofstream timestamp_out(config.timestamp_file_name);
timestamp_out.write(timestamp.c_str(), timestamp.length()); timestamp_out.write(timestamp.c_str(), timestamp.length());
@ -210,12 +212,10 @@ int Extractor::run(ScriptingEnvironment &scripting_environment)
} }
} }
TIMER_STOP(parsing); TIMER_STOP(parsing);
util::SimpleLogger().Write() << "Parsing finished after " << TIMER_SEC(parsing) util::Log() << "Parsing finished after " << TIMER_SEC(parsing) << " seconds";
<< " seconds";
util::SimpleLogger().Write() << "Raw input contains " << number_of_nodes << " nodes, " util::Log() << "Raw input contains " << number_of_nodes << " nodes, " << number_of_ways
<< number_of_ways << " ways, and " << number_of_relations << " ways, and " << number_of_relations << " relations";
<< " relations";
// take control over the turn lane map // take control over the turn lane map
turn_lane_map = extractor_callbacks->moveOutLaneDescriptionMap(); turn_lane_map = extractor_callbacks->moveOutLaneDescriptionMap();
@ -224,8 +224,8 @@ int Extractor::run(ScriptingEnvironment &scripting_environment)
if (extraction_containers.all_edges_list.empty()) if (extraction_containers.all_edges_list.empty())
{ {
util::SimpleLogger().Write(logWARNING) << "The input data is empty, exiting."; throw util::exception(std::string("There are no edges remaining after parsing.") +
return 1; SOURCE_REF);
} }
extraction_containers.PrepareData(scripting_environment, extraction_containers.PrepareData(scripting_environment,
@ -237,15 +237,14 @@ int Extractor::run(ScriptingEnvironment &scripting_environment)
scripting_environment.GetProfileProperties()); scripting_environment.GetProfileProperties());
TIMER_STOP(extracting); TIMER_STOP(extracting);
util::SimpleLogger().Write() << "extraction finished after " << TIMER_SEC(extracting) util::Log() << "extraction finished after " << TIMER_SEC(extracting) << "s";
<< "s";
} }
{ {
// Transform the node-based graph that OSM is based on into an edge-based graph // Transform the node-based graph that OSM is based on into an edge-based graph
// that is better for routing. Every edge becomes a node, and every valid // that is better for routing. Every edge becomes a node, and every valid
// movement (e.g. turn from A->B, and B->A) becomes an edge // movement (e.g. turn from A->B, and B->A) becomes an edge
util::SimpleLogger().Write() << "Generating edge-expanded graph representation"; util::Log() << "Generating edge-expanded graph representation";
TIMER_START(expansion); TIMER_START(expansion);
@ -267,17 +266,16 @@ int Extractor::run(ScriptingEnvironment &scripting_environment)
TIMER_STOP(expansion); TIMER_STOP(expansion);
util::SimpleLogger().Write() << "Saving edge-based node weights to file."; util::Log() << "Saving edge-based node weights to file.";
TIMER_START(timer_write_node_weights); TIMER_START(timer_write_node_weights);
util::serializeVector(config.edge_based_node_weights_output_path, edge_based_node_weights); util::serializeVector(config.edge_based_node_weights_output_path, edge_based_node_weights);
TIMER_STOP(timer_write_node_weights); TIMER_STOP(timer_write_node_weights);
util::SimpleLogger().Write() << "Done writing. (" << TIMER_SEC(timer_write_node_weights) util::Log() << "Done writing. (" << TIMER_SEC(timer_write_node_weights) << ")";
<< ")";
util::SimpleLogger().Write() << "Computing strictly connected components ..."; util::Log() << "Computing strictly connected components ...";
FindComponents(max_edge_id, edge_based_edge_list, edge_based_node_list); FindComponents(max_edge_id, edge_based_edge_list, edge_based_node_list);
util::SimpleLogger().Write() << "Building r-tree ..."; util::Log() << "Building r-tree ...";
TIMER_START(rtree); TIMER_START(rtree);
BuildRTree(std::move(edge_based_node_list), BuildRTree(std::move(edge_based_node_list),
std::move(node_is_startpoint), std::move(node_is_startpoint),
@ -285,7 +283,7 @@ int Extractor::run(ScriptingEnvironment &scripting_environment)
TIMER_STOP(rtree); TIMER_STOP(rtree);
util::SimpleLogger().Write() << "Writing node map ..."; util::Log() << "Writing node map ...";
WriteNodeMapping(internal_to_external_node_map); WriteNodeMapping(internal_to_external_node_map);
WriteEdgeBasedGraph(config.edge_graph_output_path, max_edge_id, edge_based_edge_list); WriteEdgeBasedGraph(config.edge_graph_output_path, max_edge_id, edge_based_edge_list);
@ -295,10 +293,10 @@ int Extractor::run(ScriptingEnvironment &scripting_environment)
const auto edges_per_second = const auto edges_per_second =
static_cast<std::uint64_t>((max_edge_id + 1) / TIMER_SEC(expansion)); static_cast<std::uint64_t>((max_edge_id + 1) / TIMER_SEC(expansion));
util::SimpleLogger().Write() << "Expansion: " << nodes_per_second << " nodes/sec and " util::Log() << "Expansion: " << nodes_per_second << " nodes/sec and " << edges_per_second
<< edges_per_second << " edges/sec"; << " edges/sec";
util::SimpleLogger().Write() << "To prepare the data for routing, run: " util::Log() << "To prepare the data for routing, run: "
<< "./osrm-contract " << config.output_file_name << std::endl; << "./osrm-contract " << config.output_file_name;
} }
return 0; return 0;
@ -310,7 +308,7 @@ void Extractor::WriteProfileProperties(const std::string &output_path,
boost::filesystem::ofstream out_stream(output_path); boost::filesystem::ofstream out_stream(output_path);
if (!out_stream) if (!out_stream)
{ {
throw util::exception("Could not open " + output_path + " for writing."); throw util::exception("Could not open " + output_path + " for writing." + SOURCE_REF);
} }
out_stream.write(reinterpret_cast<const char *>(&properties), sizeof(properties)); out_stream.write(reinterpret_cast<const char *>(&properties), sizeof(properties));
@ -406,7 +404,7 @@ std::shared_ptr<RestrictionMap> Extractor::LoadRestrictionMap()
util::loadRestrictionsFromFile(file_reader, restriction_list); util::loadRestrictionsFromFile(file_reader, restriction_list);
util::SimpleLogger().Write() << " - " << restriction_list.size() << " restrictions."; util::Log() << " - " << restriction_list.size() << " restrictions.";
return std::make_shared<RestrictionMap>(restriction_list); return std::make_shared<RestrictionMap>(restriction_list);
} }
@ -428,16 +426,16 @@ Extractor::LoadNodeBasedGraph(std::unordered_set<NodeID> &barriers,
NodeID number_of_node_based_nodes = util::loadNodesFromFile( NodeID number_of_node_based_nodes = util::loadNodesFromFile(
file_reader, barriers_iter, traffic_signals_iter, internal_to_external_node_map); file_reader, barriers_iter, traffic_signals_iter, internal_to_external_node_map);
util::SimpleLogger().Write() << " - " << barriers.size() << " bollard nodes, " util::Log() << " - " << barriers.size() << " bollard nodes, " << traffic_signals.size()
<< traffic_signals.size() << " traffic lights"; << " traffic lights";
std::vector<NodeBasedEdge> edge_list; std::vector<NodeBasedEdge> edge_list;
util::loadEdgesFromFile(file_reader, edge_list); util::loadEdgesFromFile(file_reader, edge_list);
if (edge_list.empty()) if (edge_list.empty())
{ {
util::SimpleLogger().Write(logWARNING) << "The input data is empty, exiting."; throw util::exception("Node-based-graph (" + config.output_file_name +
return std::shared_ptr<util::NodeBasedDynamicGraph>(); ") contains no edges." + SOURCE_REF);
} }
return util::NodeBasedDynamicGraphFromEdges(number_of_node_based_nodes, edge_list); return util::NodeBasedDynamicGraphFromEdges(number_of_node_based_nodes, edge_list);
@ -541,9 +539,9 @@ void Extractor::BuildRTree(std::vector<EdgeBasedNode> node_based_edge_list,
std::vector<bool> node_is_startpoint, std::vector<bool> node_is_startpoint,
const std::vector<QueryNode> &internal_to_external_node_map) const std::vector<QueryNode> &internal_to_external_node_map)
{ {
util::SimpleLogger().Write() << "constructing r-tree of " << node_based_edge_list.size() util::Log() << "constructing r-tree of " << node_based_edge_list.size()
<< " edge elements build on-top of " << " edge elements build on-top of " << internal_to_external_node_map.size()
<< internal_to_external_node_map.size() << " coordinates"; << " coordinates";
BOOST_ASSERT(node_is_startpoint.size() == node_based_edge_list.size()); BOOST_ASSERT(node_is_startpoint.size() == node_based_edge_list.size());
@ -564,7 +562,8 @@ void Extractor::BuildRTree(std::vector<EdgeBasedNode> node_based_edge_list,
if (new_size == 0) if (new_size == 0)
{ {
throw util::exception("There are no snappable edges left after processing. Are you " throw util::exception("There are no snappable edges left after processing. Are you "
"setting travel modes correctly in the profile? Cannot continue."); "setting travel modes correctly in the profile? Cannot continue." +
SOURCE_REF);
} }
node_based_edge_list.resize(new_size); node_based_edge_list.resize(new_size);
@ -575,8 +574,7 @@ void Extractor::BuildRTree(std::vector<EdgeBasedNode> node_based_edge_list,
internal_to_external_node_map); internal_to_external_node_map);
TIMER_STOP(construction); TIMER_STOP(construction);
util::SimpleLogger().Write() << "finished r-tree construction in " << TIMER_SEC(construction) util::Log() << "finished r-tree construction in " << TIMER_SEC(construction) << " seconds";
<< " seconds";
} }
void Extractor::WriteEdgeBasedGraph( void Extractor::WriteEdgeBasedGraph(
@ -590,8 +588,7 @@ void Extractor::WriteEdgeBasedGraph(
const util::FingerPrint fingerprint = util::FingerPrint::GetValid(); const util::FingerPrint fingerprint = util::FingerPrint::GetValid();
file_out_stream.write((char *)&fingerprint, sizeof(util::FingerPrint)); file_out_stream.write((char *)&fingerprint, sizeof(util::FingerPrint));
util::SimpleLogger().Write() << "[extractor] Writing edge-based-graph edges ... " util::Log() << "Writing edge-based-graph edges ... " << std::flush;
<< std::flush;
TIMER_START(write_edges); TIMER_START(write_edges);
std::uint64_t number_of_used_edges = edge_based_edge_list.size(); std::uint64_t number_of_used_edges = edge_based_edge_list.size();
@ -604,9 +601,9 @@ void Extractor::WriteEdgeBasedGraph(
} }
TIMER_STOP(write_edges); TIMER_STOP(write_edges);
util::SimpleLogger().Write() << "ok, after " << TIMER_SEC(write_edges) << "s" << std::endl; util::Log() << "ok, after " << TIMER_SEC(write_edges) << "s";
util::SimpleLogger().Write() << "Processed " << number_of_used_edges << " edges"; util::Log() << "Processed " << number_of_used_edges << " edges";
} }
void Extractor::WriteIntersectionClassificationData( void Extractor::WriteIntersectionClassificationData(
@ -618,12 +615,11 @@ void Extractor::WriteIntersectionClassificationData(
std::ofstream file_out_stream(output_file_name.c_str(), std::ios::binary); std::ofstream file_out_stream(output_file_name.c_str(), std::ios::binary);
if (!file_out_stream) if (!file_out_stream)
{ {
util::SimpleLogger().Write(logWARNING) << "Failed to open " << output_file_name util::Log(logERROR) << "Failed to open " << output_file_name << " for writing";
<< " for writing";
return; return;
} }
util::SimpleLogger().Write() << "Writing Intersection Classification Data"; util::Log() << "Writing Intersection Classification Data";
TIMER_START(write_edges); TIMER_START(write_edges);
util::writeFingerprint(file_out_stream); util::writeFingerprint(file_out_stream);
util::serializeVector(file_out_stream, node_based_intersection_classes); util::serializeVector(file_out_stream, node_based_intersection_classes);
@ -652,16 +648,15 @@ void Extractor::WriteIntersectionClassificationData(
if (!static_cast<bool>(file_out_stream)) if (!static_cast<bool>(file_out_stream))
{ {
throw util::exception("Failed to write to " + output_file_name + "."); throw util::exception("Failed to write to " + output_file_name + "." + SOURCE_REF);
} }
util::serializeVector(file_out_stream, entry_classes); util::serializeVector(file_out_stream, entry_classes);
TIMER_STOP(write_edges); TIMER_STOP(write_edges);
util::SimpleLogger().Write() << "ok, after " << TIMER_SEC(write_edges) << "s for " util::Log() << "ok, after " << TIMER_SEC(write_edges) << "s for "
<< node_based_intersection_classes.size() << " Indices into " << node_based_intersection_classes.size() << " Indices into "
<< bearing_classes.size() << " bearing classes and " << bearing_classes.size() << " bearing classes and " << entry_classes.size()
<< entry_classes.size() << " entry classes and " << total_bearings << " entry classes and " << total_bearings << " bearing values.";
<< " bearing values." << std::endl;
} }
void Extractor::WriteTurnLaneData(const std::string &turn_lane_file) const void Extractor::WriteTurnLaneData(const std::string &turn_lane_file) const
@ -671,27 +666,26 @@ void Extractor::WriteTurnLaneData(const std::string &turn_lane_file) const
std::vector<guidance::TurnLaneType::Mask> turn_lane_masks; std::vector<guidance::TurnLaneType::Mask> turn_lane_masks;
std::tie(turn_lane_offsets, turn_lane_masks) = transformTurnLaneMapIntoArrays(turn_lane_map); std::tie(turn_lane_offsets, turn_lane_masks) = transformTurnLaneMapIntoArrays(turn_lane_map);
util::SimpleLogger().Write() << "Writing turn lane masks..."; util::Log() << "Writing turn lane masks...";
TIMER_START(turn_lane_timer); TIMER_START(turn_lane_timer);
std::ofstream ofs(turn_lane_file, std::ios::binary); std::ofstream ofs(turn_lane_file, std::ios::binary);
if (!ofs) if (!ofs)
throw osrm::util::exception("Failed to open " + turn_lane_file + " for writing."); throw osrm::util::exception("Failed to open " + turn_lane_file + " for writing." +
SOURCE_REF);
if (!util::serializeVector(ofs, turn_lane_offsets)) if (!util::serializeVector(ofs, turn_lane_offsets))
{ {
util::SimpleLogger().Write(logWARNING) << "Error while writing."; throw util::exception("Error while writing to " + turn_lane_file + SOURCE_REF);
return;
} }
if (!util::serializeVector(ofs, turn_lane_masks)) if (!util::serializeVector(ofs, turn_lane_masks))
{ {
util::SimpleLogger().Write(logWARNING) << "Error while writing."; throw util::exception("Error while writing to " + turn_lane_file + SOURCE_REF);
return;
} }
TIMER_STOP(turn_lane_timer); TIMER_STOP(turn_lane_timer);
util::SimpleLogger().Write() << "done (" << TIMER_SEC(turn_lane_timer) << ")"; util::Log() << "done (" << TIMER_SEC(turn_lane_timer) << ")";
} }
} // namespace extractor } // namespace extractor

View File

@ -8,7 +8,7 @@
#include "util/for_each_pair.hpp" #include "util/for_each_pair.hpp"
#include "util/guidance/turn_lanes.hpp" #include "util/guidance/turn_lanes.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include <boost/numeric/conversion/cast.hpp> #include <boost/numeric/conversion/cast.hpp>
#include <boost/optional/optional.hpp> #include <boost/optional/optional.hpp>
@ -62,7 +62,7 @@ void ExtractorCallbacks::ProcessRestriction(
if (restriction) if (restriction)
{ {
external_memory.restrictions_list.push_back(restriction.get()); external_memory.restrictions_list.push_back(restriction.get());
// util::SimpleLogger().Write() << "from: " << restriction.get().restriction.from.node << // util::Log() << "from: " << restriction.get().restriction.from.node <<
// ",via: " << restriction.get().restriction.via.node << // ",via: " << restriction.get().restriction.via.node <<
// ", to: " << restriction.get().restriction.to.node << // ", to: " << restriction.get().restriction.to.node <<
// ", only: " << (restriction.get().restriction.flags.is_only ? // ", only: " << (restriction.get().restriction.flags.is_only ?
@ -96,8 +96,8 @@ void ExtractorCallbacks::ProcessWay(const osmium::Way &input_way, const Extracti
if (std::numeric_limits<decltype(input_way.id())>::max() == input_way.id()) if (std::numeric_limits<decltype(input_way.id())>::max() == input_way.id())
{ {
util::SimpleLogger().Write(logDEBUG) << "found bogus way with id: " << input_way.id() util::Log(logDEBUG) << "found bogus way with id: " << input_way.id() << " of size "
<< " of size " << input_way.nodes().size(); << input_way.nodes().size();
return; return;
} }
@ -135,8 +135,7 @@ void ExtractorCallbacks::ProcessWay(const osmium::Way &input_way, const Extracti
if (forward_weight_data.type == InternalExtractorEdge::WeightType::INVALID && if (forward_weight_data.type == InternalExtractorEdge::WeightType::INVALID &&
backward_weight_data.type == InternalExtractorEdge::WeightType::INVALID) backward_weight_data.type == InternalExtractorEdge::WeightType::INVALID)
{ {
util::SimpleLogger().Write(logDEBUG) << "found way with bogus speed, id: " util::Log(logDEBUG) << "found way with bogus speed, id: " << input_way.id();
<< input_way.id();
return; return;
} }
@ -196,8 +195,7 @@ void ExtractorCallbacks::ProcessWay(const osmium::Way &input_way, const Extracti
if (translated_mask == TurnLaneType::empty) if (translated_mask == TurnLaneType::empty)
{ {
// if we have unsupported tags, don't handle them // if we have unsupported tags, don't handle them
util::SimpleLogger().Write(logDEBUG) << "Unsupported lane tag found: \"" util::Log(logDEBUG) << "Unsupported lane tag found: \"" << *token_itr << "\"";
<< *token_itr << "\"";
return {}; return {};
} }

View File

@ -6,7 +6,7 @@
#include "util/node_based_graph.hpp" #include "util/node_based_graph.hpp"
#include "util/percent.hpp" #include "util/percent.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
namespace osrm namespace osrm
{ {
@ -22,175 +22,185 @@ void GraphCompressor::Compress(const std::unordered_set<NodeID> &barrier_nodes,
const unsigned original_number_of_nodes = graph.GetNumberOfNodes(); const unsigned original_number_of_nodes = graph.GetNumberOfNodes();
const unsigned original_number_of_edges = graph.GetNumberOfEdges(); const unsigned original_number_of_edges = graph.GetNumberOfEdges();
util::Percent progress(original_number_of_nodes);
for (const NodeID node_v : util::irange(0u, original_number_of_nodes))
{ {
progress.PrintStatus(node_v); util::UnbufferedLog log;
util::Percent progress(log, original_number_of_nodes);
// only contract degree 2 vertices for (const NodeID node_v : util::irange(0u, original_number_of_nodes))
if (2 != graph.GetOutDegree(node_v))
{ {
continue; progress.PrintStatus(node_v);
}
// don't contract barrier node // only contract degree 2 vertices
if (barrier_nodes.end() != barrier_nodes.find(node_v)) if (2 != graph.GetOutDegree(node_v))
{ {
continue;
}
// check if v is a via node for a turn restriction, i.e. a 'directed' barrier node
if (restriction_map.IsViaNode(node_v))
{
continue;
}
// reverse_e2 forward_e2
// u <---------- v -----------> w
// ----------> <-----------
// forward_e1 reverse_e1
//
// Will be compressed to:
//
// reverse_e1
// u <---------- w
// ---------->
// forward_e1
//
// If the edges are compatible.
const bool reverse_edge_order = graph.GetEdgeData(graph.BeginEdges(node_v)).reversed;
const EdgeID forward_e2 = graph.BeginEdges(node_v) + reverse_edge_order;
BOOST_ASSERT(SPECIAL_EDGEID != forward_e2);
BOOST_ASSERT(forward_e2 >= graph.BeginEdges(node_v) && forward_e2 < graph.EndEdges(node_v));
const EdgeID reverse_e2 = graph.BeginEdges(node_v) + 1 - reverse_edge_order;
BOOST_ASSERT(SPECIAL_EDGEID != reverse_e2);
BOOST_ASSERT(reverse_e2 >= graph.BeginEdges(node_v) && reverse_e2 < graph.EndEdges(node_v));
const EdgeData &fwd_edge_data2 = graph.GetEdgeData(forward_e2);
const EdgeData &rev_edge_data2 = graph.GetEdgeData(reverse_e2);
const NodeID node_w = graph.GetTarget(forward_e2);
BOOST_ASSERT(SPECIAL_NODEID != node_w);
BOOST_ASSERT(node_v != node_w);
const NodeID node_u = graph.GetTarget(reverse_e2);
BOOST_ASSERT(SPECIAL_NODEID != node_u);
BOOST_ASSERT(node_u != node_v);
const EdgeID forward_e1 = graph.FindEdge(node_u, node_v);
BOOST_ASSERT(SPECIAL_EDGEID != forward_e1);
BOOST_ASSERT(node_v == graph.GetTarget(forward_e1));
const EdgeID reverse_e1 = graph.FindEdge(node_w, node_v);
BOOST_ASSERT(SPECIAL_EDGEID != reverse_e1);
BOOST_ASSERT(node_v == graph.GetTarget(reverse_e1));
const EdgeData &fwd_edge_data1 = graph.GetEdgeData(forward_e1);
const EdgeData &rev_edge_data1 = graph.GetEdgeData(reverse_e1);
if (graph.FindEdgeInEitherDirection(node_u, node_w) != SPECIAL_EDGEID)
{
continue;
}
// this case can happen if two ways with different names overlap
if (fwd_edge_data1.name_id != rev_edge_data1.name_id ||
fwd_edge_data2.name_id != rev_edge_data2.name_id)
{
continue;
}
if (fwd_edge_data1.CanCombineWith(fwd_edge_data2) &&
rev_edge_data1.CanCombineWith(rev_edge_data2))
{
BOOST_ASSERT(graph.GetEdgeData(forward_e1).name_id ==
graph.GetEdgeData(reverse_e1).name_id);
BOOST_ASSERT(graph.GetEdgeData(forward_e2).name_id ==
graph.GetEdgeData(reverse_e2).name_id);
// Do not compress edge if it crosses a traffic signal.
// This can't be done in CanCombineWith, becase we only store the
// traffic signals in the `traffic_lights` list, which EdgeData
// doesn't have access to.
const bool has_node_penalty = traffic_lights.find(node_v) != traffic_lights.end();
if (has_node_penalty)
continue; continue;
}
// Get distances before graph is modified // don't contract barrier node
const int forward_weight1 = graph.GetEdgeData(forward_e1).distance; if (barrier_nodes.end() != barrier_nodes.find(node_v))
const int forward_weight2 = graph.GetEdgeData(forward_e2).distance; {
continue;
}
BOOST_ASSERT(0 != forward_weight1); // check if v is a via node for a turn restriction, i.e. a 'directed' barrier node
BOOST_ASSERT(0 != forward_weight2); if (restriction_map.IsViaNode(node_v))
{
continue;
}
const int reverse_weight1 = graph.GetEdgeData(reverse_e1).distance; // reverse_e2 forward_e2
const int reverse_weight2 = graph.GetEdgeData(reverse_e2).distance; // u <---------- v -----------> w
// ----------> <-----------
// forward_e1 reverse_e1
//
// Will be compressed to:
//
// reverse_e1
// u <---------- w
// ---------->
// forward_e1
//
// If the edges are compatible.
const bool reverse_edge_order = graph.GetEdgeData(graph.BeginEdges(node_v)).reversed;
const EdgeID forward_e2 = graph.BeginEdges(node_v) + reverse_edge_order;
BOOST_ASSERT(SPECIAL_EDGEID != forward_e2);
BOOST_ASSERT(forward_e2 >= graph.BeginEdges(node_v) &&
forward_e2 < graph.EndEdges(node_v));
const EdgeID reverse_e2 = graph.BeginEdges(node_v) + 1 - reverse_edge_order;
BOOST_ASSERT(SPECIAL_EDGEID != reverse_e2);
BOOST_ASSERT(reverse_e2 >= graph.BeginEdges(node_v) &&
reverse_e2 < graph.EndEdges(node_v));
BOOST_ASSERT(0 != reverse_weight1); const EdgeData &fwd_edge_data2 = graph.GetEdgeData(forward_e2);
BOOST_ASSERT(0 != reverse_weight2); const EdgeData &rev_edge_data2 = graph.GetEdgeData(reverse_e2);
// add weight of e2's to e1 const NodeID node_w = graph.GetTarget(forward_e2);
graph.GetEdgeData(forward_e1).distance += fwd_edge_data2.distance; BOOST_ASSERT(SPECIAL_NODEID != node_w);
graph.GetEdgeData(reverse_e1).distance += rev_edge_data2.distance; BOOST_ASSERT(node_v != node_w);
const NodeID node_u = graph.GetTarget(reverse_e2);
BOOST_ASSERT(SPECIAL_NODEID != node_u);
BOOST_ASSERT(node_u != node_v);
// extend e1's to targets of e2's const EdgeID forward_e1 = graph.FindEdge(node_u, node_v);
graph.SetTarget(forward_e1, node_w); BOOST_ASSERT(SPECIAL_EDGEID != forward_e1);
graph.SetTarget(reverse_e1, node_u); BOOST_ASSERT(node_v == graph.GetTarget(forward_e1));
const EdgeID reverse_e1 = graph.FindEdge(node_w, node_v);
BOOST_ASSERT(SPECIAL_EDGEID != reverse_e1);
BOOST_ASSERT(node_v == graph.GetTarget(reverse_e1));
/* const EdgeData &fwd_edge_data1 = graph.GetEdgeData(forward_e1);
* Remember Lane Data for compressed parts. This handles scenarios where lane-data is const EdgeData &rev_edge_data1 = graph.GetEdgeData(reverse_e1);
* only kept up until a traffic light.
*
* | |
* ---------------- |
* -^ | |
* ----------- |
* -v | |
* --------------- |
* | |
*
* u ------- v ---- w
*
* Since the edge is compressable, we can transfer:
* "left|right" (uv) and "" (uw) into a string with "left|right" (uw) for the compressed
* edge.
* Doing so, we might mess up the point from where the lanes are shown. It should be
* reasonable, since the announcements have to come early anyhow. So there is a
* potential danger in here, but it saves us from adding a lot of additional edges for
* turn-lanes. Without this,we would have to treat any turn-lane beginning/ending just
* like a barrier.
*/
const auto selectLaneID = [](const LaneDescriptionID front,
const LaneDescriptionID back) {
// A lane has tags: u - (front) - v - (back) - w
// During contraction, we keep only one of the tags. Usually the one closer to the
// intersection is preferred. If its empty, however, we keep the non-empty one
if (back == INVALID_LANE_DESCRIPTIONID)
return front;
return back;
};
graph.GetEdgeData(forward_e1).lane_description_id =
selectLaneID(graph.GetEdgeData(forward_e1).lane_description_id,
fwd_edge_data2.lane_description_id);
graph.GetEdgeData(reverse_e1).lane_description_id =
selectLaneID(graph.GetEdgeData(reverse_e1).lane_description_id,
rev_edge_data2.lane_description_id);
// remove e2's (if bidir, otherwise only one) if (graph.FindEdgeInEitherDirection(node_u, node_w) != SPECIAL_EDGEID)
graph.DeleteEdge(node_v, forward_e2); {
graph.DeleteEdge(node_v, reverse_e2); continue;
}
// update any involved turn restrictions // this case can happen if two ways with different names overlap
restriction_map.FixupStartingTurnRestriction(node_u, node_v, node_w); if (fwd_edge_data1.name_id != rev_edge_data1.name_id ||
restriction_map.FixupArrivingTurnRestriction(node_u, node_v, node_w, graph); fwd_edge_data2.name_id != rev_edge_data2.name_id)
{
continue;
}
restriction_map.FixupStartingTurnRestriction(node_w, node_v, node_u); if (fwd_edge_data1.CanCombineWith(fwd_edge_data2) &&
restriction_map.FixupArrivingTurnRestriction(node_w, node_v, node_u, graph); rev_edge_data1.CanCombineWith(rev_edge_data2))
{
BOOST_ASSERT(graph.GetEdgeData(forward_e1).name_id ==
graph.GetEdgeData(reverse_e1).name_id);
BOOST_ASSERT(graph.GetEdgeData(forward_e2).name_id ==
graph.GetEdgeData(reverse_e2).name_id);
// store compressed geometry in container // Do not compress edge if it crosses a traffic signal.
geometry_compressor.CompressEdge( // This can't be done in CanCombineWith, becase we only store the
forward_e1, forward_e2, node_v, node_w, forward_weight1, forward_weight2); // traffic signals in the `traffic_lights` list, which EdgeData
geometry_compressor.CompressEdge( // doesn't have access to.
reverse_e1, reverse_e2, node_v, node_u, reverse_weight1, reverse_weight2); const bool has_node_penalty = traffic_lights.find(node_v) != traffic_lights.end();
if (has_node_penalty)
continue;
// Get distances before graph is modified
const int forward_weight1 = graph.GetEdgeData(forward_e1).distance;
const int forward_weight2 = graph.GetEdgeData(forward_e2).distance;
BOOST_ASSERT(0 != forward_weight1);
BOOST_ASSERT(0 != forward_weight2);
const int reverse_weight1 = graph.GetEdgeData(reverse_e1).distance;
const int reverse_weight2 = graph.GetEdgeData(reverse_e2).distance;
BOOST_ASSERT(0 != reverse_weight1);
BOOST_ASSERT(0 != reverse_weight2);
// add weight of e2's to e1
graph.GetEdgeData(forward_e1).distance += fwd_edge_data2.distance;
graph.GetEdgeData(reverse_e1).distance += rev_edge_data2.distance;
// extend e1's to targets of e2's
graph.SetTarget(forward_e1, node_w);
graph.SetTarget(reverse_e1, node_u);
/*
* Remember Lane Data for compressed parts. This handles scenarios where lane-data
* is
* only kept up until a traffic light.
*
* | |
* ---------------- |
* -^ | |
* ----------- |
* -v | |
* --------------- |
* | |
*
* u ------- v ---- w
*
* Since the edge is compressable, we can transfer:
* "left|right" (uv) and "" (uw) into a string with "left|right" (uw) for the
* compressed
* edge.
* Doing so, we might mess up the point from where the lanes are shown. It should be
* reasonable, since the announcements have to come early anyhow. So there is a
* potential danger in here, but it saves us from adding a lot of additional edges
* for
* turn-lanes. Without this,we would have to treat any turn-lane beginning/ending
* just
* like a barrier.
*/
const auto selectLaneID = [](const LaneDescriptionID front,
const LaneDescriptionID back) {
// A lane has tags: u - (front) - v - (back) - w
// During contraction, we keep only one of the tags. Usually the one closer to
// the
// intersection is preferred. If its empty, however, we keep the non-empty one
if (back == INVALID_LANE_DESCRIPTIONID)
return front;
return back;
};
graph.GetEdgeData(forward_e1).lane_description_id =
selectLaneID(graph.GetEdgeData(forward_e1).lane_description_id,
fwd_edge_data2.lane_description_id);
graph.GetEdgeData(reverse_e1).lane_description_id =
selectLaneID(graph.GetEdgeData(reverse_e1).lane_description_id,
rev_edge_data2.lane_description_id);
// remove e2's (if bidir, otherwise only one)
graph.DeleteEdge(node_v, forward_e2);
graph.DeleteEdge(node_v, reverse_e2);
// update any involved turn restrictions
restriction_map.FixupStartingTurnRestriction(node_u, node_v, node_w);
restriction_map.FixupArrivingTurnRestriction(node_u, node_v, node_w, graph);
restriction_map.FixupStartingTurnRestriction(node_w, node_v, node_u);
restriction_map.FixupArrivingTurnRestriction(node_w, node_v, node_u, graph);
// store compressed geometry in container
geometry_compressor.CompressEdge(
forward_e1, forward_e2, node_v, node_w, forward_weight1, forward_weight2);
geometry_compressor.CompressEdge(
reverse_e1, reverse_e2, node_v, node_u, reverse_weight1, reverse_weight2);
}
} }
} }
@ -226,10 +236,8 @@ void GraphCompressor::PrintStatistics(unsigned original_number_of_nodes,
new_edge_count += (graph.EndEdges(i) - graph.BeginEdges(i)); new_edge_count += (graph.EndEdges(i) - graph.BeginEdges(i));
} }
} }
util::SimpleLogger().Write() << "Node compression ratio: " util::Log() << "Node compression ratio: " << new_node_count / (double)original_number_of_nodes;
<< new_node_count / (double)original_number_of_nodes; util::Log() << "Edge compression ratio: " << new_edge_count / (double)original_number_of_edges;
util::SimpleLogger().Write() << "Edge compression ratio: "
<< new_edge_count / (double)original_number_of_edges;
} }
} }
} }

View File

@ -4,7 +4,7 @@
#include "util/coordinate_calculation.hpp" #include "util/coordinate_calculation.hpp"
#include "util/guidance/toolkit.hpp" #include "util/guidance/toolkit.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include <algorithm> #include <algorithm>
#include <cstddef> #include <cstddef>

View File

@ -4,7 +4,7 @@
#include "extractor/guidance/toolkit.hpp" #include "extractor/guidance/toolkit.hpp"
#include "util/guidance/toolkit.hpp" #include "util/guidance/toolkit.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include <limits> #include <limits>
#include <utility> #include <utility>
@ -197,9 +197,9 @@ Intersection MotorwayHandler::fromMotorway(const EdgeID via_eid, Intersection in
else if (countValid(intersection) > 0) // check whether turns exist at all else if (countValid(intersection) > 0) // check whether turns exist at all
{ {
// FALLBACK, this should hopefully never be reached // FALLBACK, this should hopefully never be reached
util::SimpleLogger().Write(logDEBUG) util::Log(logDEBUG) << "Fallback reached from motorway, no continue angle, "
<< "Fallback reached from motorway, no continue angle, " << intersection.size() << intersection.size() << " roads, " << countValid(intersection)
<< " roads, " << countValid(intersection) << " valid ones."; << " valid ones.";
return fallback(std::move(intersection)); return fallback(std::move(intersection));
} }
} }
@ -275,7 +275,7 @@ Intersection MotorwayHandler::fromMotorway(const EdgeID via_eid, Intersection in
via_eid, via_eid,
isThroughStreet(1, intersection), isThroughStreet(1, intersection),
intersection[1]); intersection[1]);
util::SimpleLogger().Write(logDEBUG) << "Disabled U-Turn on a freeway"; util::Log(logDEBUG) << "Disabled U-Turn on a freeway";
intersection[0].entry_allowed = false; // UTURN on the freeway intersection[0].entry_allowed = false; // UTURN on the freeway
} }
else if (exiting_motorways == 2) else if (exiting_motorways == 2)
@ -334,8 +334,8 @@ Intersection MotorwayHandler::fromMotorway(const EdgeID via_eid, Intersection in
} }
else else
{ {
util::SimpleLogger().Write(logDEBUG) << "Found motorway junction with more than " util::Log(logDEBUG) << "Found motorway junction with more than "
"2 exiting motorways or additional ramps"; "2 exiting motorways or additional ramps";
return fallback(std::move(intersection)); return fallback(std::move(intersection));
} }
} // done for more than one highway exit } // done for more than one highway exit
@ -489,9 +489,8 @@ Intersection MotorwayHandler::fromRamp(const EdgeID via_eid, Intersection inters
} }
else else
{ // FALLBACK, hopefully this should never been reached { // FALLBACK, hopefully this should never been reached
util::SimpleLogger().Write(logDEBUG) << "Reached fallback on motorway ramp with " util::Log(logDEBUG) << "Reached fallback on motorway ramp with " << intersection.size()
<< intersection.size() << " roads and " << " roads and " << countValid(intersection) << " valid turns.";
<< countValid(intersection) << " valid turns.";
return fallback(std::move(intersection)); return fallback(std::move(intersection));
} }
return intersection; return intersection;

View File

@ -4,7 +4,7 @@
#include "util/coordinate_calculation.hpp" #include "util/coordinate_calculation.hpp"
#include "util/guidance/toolkit.hpp" #include "util/guidance/toolkit.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include <algorithm> #include <algorithm>
#include <cmath> #include <cmath>

View File

@ -5,7 +5,7 @@
#include "util/coordinate.hpp" #include "util/coordinate.hpp"
#include "util/coordinate_calculation.hpp" #include "util/coordinate_calculation.hpp"
#include "util/guidance/toolkit.hpp" #include "util/guidance/toolkit.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include <cstddef> #include <cstddef>
#include <iomanip> #include <iomanip>

View File

@ -1,6 +1,6 @@
#include "extractor/guidance/turn_classification.hpp" #include "extractor/guidance/turn_classification.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include <cstddef> #include <cstddef>
#include <cstdint> #include <cstdint>

View File

@ -1,6 +1,6 @@
#include "extractor/guidance/turn_lane_augmentation.hpp" #include "extractor/guidance/turn_lane_augmentation.hpp"
#include "extractor/guidance/turn_lane_types.hpp" #include "extractor/guidance/turn_lane_types.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include <algorithm> #include <algorithm>
#include <boost/assert.hpp> #include <boost/assert.hpp>
@ -104,7 +104,7 @@ LaneDataVector augmentMultiple(const std::size_t none_index,
} }
} }
// this should, theoretically, never be reached // this should, theoretically, never be reached
util::SimpleLogger().Write(logWARNING) << "Failed lane assignment. Reached bad situation."; util::Log(logWARNING) << "Failed lane assignment. Reached bad situation.";
return std::make_pair(std::size_t{0}, std::size_t{0}); return std::make_pair(std::size_t{0}, std::size_t{0});
}(); }();
for (auto intersection_index = range.first; intersection_index < range.second; for (auto intersection_index = range.first; intersection_index < range.second;

View File

@ -3,7 +3,7 @@
#include "extractor/guidance/turn_discovery.hpp" #include "extractor/guidance/turn_discovery.hpp"
#include "extractor/guidance/turn_lane_augmentation.hpp" #include "extractor/guidance/turn_lane_augmentation.hpp"
#include "extractor/guidance/turn_lane_matcher.hpp" #include "extractor/guidance/turn_lane_matcher.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include "util/typedefs.hpp" #include "util/typedefs.hpp"
#include <cstddef> #include <cstddef>
@ -46,8 +46,8 @@ TurnLaneHandler::TurnLaneHandler(const util::NodeBasedDynamicGraph &node_based_g
TurnLaneHandler::~TurnLaneHandler() TurnLaneHandler::~TurnLaneHandler()
{ {
std::cout << "Handled: " << count_handled << " of " << count_called util::Log() << "Handled: " << count_handled << " of " << count_called
<< " lanes: " << (double)(count_handled * 100) / (count_called) << " %." << std::endl; << " lanes: " << (double)(count_handled * 100) / (count_called) << " %.";
} }
/* /*

View File

@ -1,7 +1,10 @@
#include "extractor/raster_source.hpp" #include "extractor/raster_source.hpp"
#include "util/simple_logger.hpp" #include "util/exception.hpp"
#include "util/exception_utils.hpp"
#include "util/log.hpp"
#include "util/timing_util.hpp" #include "util/timing_util.hpp"
#include "util/typedefs.hpp"
#include <cmath> #include <cmath>
@ -92,20 +95,20 @@ int SourceContainer::LoadRasterSource(const std::string &path_string,
const auto itr = LoadedSourcePaths.find(path_string); const auto itr = LoadedSourcePaths.find(path_string);
if (itr != LoadedSourcePaths.end()) if (itr != LoadedSourcePaths.end())
{ {
util::SimpleLogger().Write() << "[source loader] Already loaded source '" << path_string util::Log() << "[source loader] Already loaded source '" << path_string << "' at source_id "
<< "' at source_id " << itr->second; << itr->second;
return itr->second; return itr->second;
} }
int source_id = static_cast<int>(LoadedSources.size()); int source_id = static_cast<int>(LoadedSources.size());
util::SimpleLogger().Write() << "[source loader] Loading from " << path_string << " ... "; util::Log() << "[source loader] Loading from " << path_string << " ... ";
TIMER_START(loading_source); TIMER_START(loading_source);
boost::filesystem::path filepath(path_string); boost::filesystem::path filepath(path_string);
if (!boost::filesystem::exists(filepath)) if (!boost::filesystem::exists(filepath))
{ {
throw util::exception("error reading: no such path"); throw util::exception(path_string + " does not exist" + SOURCE_REF);
} }
RasterGrid rasterData{filepath, ncols, nrows}; RasterGrid rasterData{filepath, ncols, nrows};
@ -115,8 +118,7 @@ int SourceContainer::LoadRasterSource(const std::string &path_string,
LoadedSourcePaths.emplace(path_string, source_id); LoadedSourcePaths.emplace(path_string, source_id);
LoadedSources.push_back(std::move(source)); LoadedSources.push_back(std::move(source));
util::SimpleLogger().Write() << "[source loader] ok, after " << TIMER_SEC(loading_source) util::Log() << "[source loader] ok, after " << TIMER_SEC(loading_source) << "s";
<< "s";
return source_id; return source_id;
} }
@ -126,7 +128,9 @@ RasterDatum SourceContainer::GetRasterDataFromSource(unsigned int source_id, dou
{ {
if (LoadedSources.size() < source_id + 1) if (LoadedSources.size() < source_id + 1)
{ {
throw util::exception("error reading: no such loaded source"); throw util::exception("Attempted to access source " + std::to_string(source_id) +
", but there are only " + std::to_string(LoadedSources.size()) +
" loaded" + SOURCE_REF);
} }
BOOST_ASSERT(lat < 90); BOOST_ASSERT(lat < 90);
@ -145,7 +149,9 @@ SourceContainer::GetRasterInterpolateFromSource(unsigned int source_id, double l
{ {
if (LoadedSources.size() < source_id + 1) if (LoadedSources.size() < source_id + 1)
{ {
throw util::exception("error reading: no such loaded source"); throw util::exception("Attempted to access source " + std::to_string(source_id) +
", but there are only " + std::to_string(LoadedSources.size()) +
" loaded" + SOURCE_REF);
} }
BOOST_ASSERT(lat < 90); BOOST_ASSERT(lat < 90);

View File

@ -4,7 +4,7 @@
#include "extractor/external_memory_node.hpp" #include "extractor/external_memory_node.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include <boost/algorithm/string.hpp> #include <boost/algorithm/string.hpp>
#include <boost/algorithm/string/predicate.hpp> #include <boost/algorithm/string/predicate.hpp>
@ -33,15 +33,15 @@ RestrictionParser::RestrictionParser(ScriptingEnvironment &scripting_environment
const unsigned count = restrictions.size(); const unsigned count = restrictions.size();
if (count > 0) if (count > 0)
{ {
util::SimpleLogger().Write() << "Found " << count << " turn restriction tags:"; util::Log() << "Found " << count << " turn restriction tags:";
for (const std::string &str : restrictions) for (const std::string &str : restrictions)
{ {
util::SimpleLogger().Write() << " " << str; util::Log() << " " << str;
} }
} }
else else
{ {
util::SimpleLogger().Write() << "Found no turn restriction tags"; util::Log() << "Found no turn restriction tags";
} }
} }
} }

View File

@ -9,8 +9,9 @@
#include "extractor/raster_source.hpp" #include "extractor/raster_source.hpp"
#include "extractor/restriction_parser.hpp" #include "extractor/restriction_parser.hpp"
#include "util/exception.hpp" #include "util/exception.hpp"
#include "util/exception_utils.hpp"
#include "util/log.hpp"
#include "util/lua_util.hpp" #include "util/lua_util.hpp"
#include "util/simple_logger.hpp"
#include "util/typedefs.hpp" #include "util/typedefs.hpp"
#include <luabind/iterator_policy.hpp> #include <luabind/iterator_policy.hpp>
@ -65,14 +66,14 @@ int luaErrorCallback(lua_State *state)
std::string error_msg = lua_tostring(state, -1); std::string error_msg = lua_tostring(state, -1);
std::ostringstream error_stream; std::ostringstream error_stream;
error_stream << error_msg; error_stream << error_msg;
throw util::exception("ERROR occurred in profile script:\n" + error_stream.str()); throw util::exception("ERROR occurred in profile script:\n" + error_stream.str() + SOURCE_REF);
} }
} }
LuaScriptingEnvironment::LuaScriptingEnvironment(const std::string &file_name) LuaScriptingEnvironment::LuaScriptingEnvironment(const std::string &file_name)
: file_name(file_name) : file_name(file_name)
{ {
util::SimpleLogger().Write() << "Using script " << file_name; util::Log() << "Using script " << file_name;
} }
void LuaScriptingEnvironment::InitContext(LuaScriptingContext &context) void LuaScriptingEnvironment::InitContext(LuaScriptingContext &context)
@ -265,7 +266,8 @@ void LuaScriptingEnvironment::InitContext(LuaScriptingContext &context)
luabind::object error_msg(luabind::from_stack(context.state, -1)); luabind::object error_msg(luabind::from_stack(context.state, -1));
std::ostringstream error_stream; std::ostringstream error_stream;
error_stream << error_msg; error_stream << error_msg;
throw util::exception("ERROR occurred in profile script:\n" + error_stream.str()); throw util::exception("ERROR occurred in profile script:\n" + error_stream.str() +
SOURCE_REF);
} }
context.has_turn_penalty_function = util::luaFunctionExists(context.state, "turn_function"); context.has_turn_penalty_function = util::luaFunctionExists(context.state, "turn_function");
@ -360,7 +362,7 @@ std::vector<std::string> LuaScriptingEnvironment::GetNameSuffixList()
} }
catch (const luabind::error &er) catch (const luabind::error &er)
{ {
util::SimpleLogger().Write(logWARNING) << er.what(); util::Log(logWARNING) << er.what();
} }
return suffixes_vector; return suffixes_vector;
@ -406,7 +408,7 @@ int32_t LuaScriptingEnvironment::GetTurnPenalty(const double angle)
} }
catch (const luabind::error &er) catch (const luabind::error &er)
{ {
util::SimpleLogger().Write(logWARNING) << er.what(); util::Log(logWARNING) << er.what();
} }
} }
return 0; return 0;

View File

@ -6,7 +6,7 @@
#include "server/http/request.hpp" #include "server/http/request.hpp"
#include "util/json_renderer.hpp" #include "util/json_renderer.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include "util/string_util.hpp" #include "util/string_util.hpp"
#include "util/timing_util.hpp" #include "util/timing_util.hpp"
#include "util/typedefs.hpp" #include "util/typedefs.hpp"
@ -42,7 +42,7 @@ void RequestHandler::HandleRequest(const http::request &current_request, http::r
if (!service_handler) if (!service_handler)
{ {
current_reply = http::reply::stock_reply(http::reply::internal_server_error); current_reply = http::reply::stock_reply(http::reply::internal_server_error);
util::SimpleLogger().Write(logWARNING) << "No service handler registered." << std::endl; util::Log(logWARNING) << "No service handler registered." << std::endl;
return; return;
} }
@ -52,7 +52,7 @@ void RequestHandler::HandleRequest(const http::request &current_request, http::r
TIMER_START(request_duration); TIMER_START(request_duration);
std::string request_string; std::string request_string;
util::URIDecode(current_request.uri, request_string); util::URIDecode(current_request.uri, request_string);
util::SimpleLogger().Write(logDEBUG) << "req: " << request_string; util::Log(logDEBUG) << "req: " << request_string;
auto api_iterator = request_string.begin(); auto api_iterator = request_string.begin();
auto maybe_parsed_url = api::parseURL(api_iterator, request_string.end()); auto maybe_parsed_url = api::parseURL(api_iterator, request_string.end());
@ -125,7 +125,7 @@ void RequestHandler::HandleRequest(const http::request &current_request, http::r
{ {
// deactivated as GCC apparently does not implement that, not even in 4.9 // deactivated as GCC apparently does not implement that, not even in 4.9
// std::time_t t = std::time(nullptr); // std::time_t t = std::time(nullptr);
// util::SimpleLogger().Write() << std::put_time(std::localtime(&t), "%m-%d-%Y // util::Log() << std::put_time(std::localtime(&t), "%m-%d-%Y
// %H:%M:%S") << // %H:%M:%S") <<
// " " << current_request.endpoint.to_string() << " " << // " " << current_request.endpoint.to_string() << " " <<
// current_request.referrer << ( 0 == current_request.referrer.length() ? "- " :" ") // current_request.referrer << ( 0 == current_request.referrer.length() ? "- " :" ")
@ -140,25 +140,26 @@ void RequestHandler::HandleRequest(const http::request &current_request, http::r
ltime = time(nullptr); ltime = time(nullptr);
time_stamp = localtime(&ltime); time_stamp = localtime(&ltime);
// log timestamp // log timestamp
util::SimpleLogger().Write() util::Log() << (time_stamp->tm_mday < 10 ? "0" : "") << time_stamp->tm_mday << "-"
<< (time_stamp->tm_mday < 10 ? "0" : "") << time_stamp->tm_mday << "-" << (time_stamp->tm_mon + 1 < 10 ? "0" : "") << (time_stamp->tm_mon + 1)
<< (time_stamp->tm_mon + 1 < 10 ? "0" : "") << (time_stamp->tm_mon + 1) << "-" << "-" << 1900 + time_stamp->tm_year << " "
<< 1900 + time_stamp->tm_year << " " << (time_stamp->tm_hour < 10 ? "0" : "") << (time_stamp->tm_hour < 10 ? "0" : "") << time_stamp->tm_hour << ":"
<< time_stamp->tm_hour << ":" << (time_stamp->tm_min < 10 ? "0" : "") << (time_stamp->tm_min < 10 ? "0" : "") << time_stamp->tm_min << ":"
<< time_stamp->tm_min << ":" << (time_stamp->tm_sec < 10 ? "0" : "") << (time_stamp->tm_sec < 10 ? "0" : "") << time_stamp->tm_sec << " "
<< time_stamp->tm_sec << " " << TIMER_MSEC(request_duration) << "ms " << TIMER_MSEC(request_duration) << "ms "
<< current_request.endpoint.to_string() << " " << current_request.referrer << current_request.endpoint.to_string() << " " << current_request.referrer
<< (0 == current_request.referrer.length() ? "- " : " ") << current_request.agent << (0 == current_request.referrer.length() ? "- " : " ")
<< (0 == current_request.agent.length() ? "- " : " ") << current_reply.status << current_request.agent
<< " " // << (0 == current_request.agent.length() ? "- " : " ")
<< request_string; << current_reply.status << " " //
<< request_string;
} }
} }
catch (const std::exception &e) catch (const std::exception &e)
{ {
current_reply = http::reply::stock_reply(http::reply::internal_server_error); current_reply = http::reply::stock_reply(http::reply::internal_server_error);
util::SimpleLogger().Write(logWARNING) << "[server error] code: " << e.what() util::Log(logWARNING) << "[server error] code: " << e.what()
<< ", uri: " << current_request.uri; << ", uri: " << current_request.uri;
} }
} }
} }

View File

@ -14,12 +14,13 @@
#include "engine/datafacade/datafacade_base.hpp" #include "engine/datafacade/datafacade_base.hpp"
#include "util/coordinate.hpp" #include "util/coordinate.hpp"
#include "util/exception.hpp" #include "util/exception.hpp"
#include "util/exception_utils.hpp"
#include "util/fingerprint.hpp" #include "util/fingerprint.hpp"
#include "util/io.hpp" #include "util/io.hpp"
#include "util/log.hpp"
#include "util/packed_vector.hpp" #include "util/packed_vector.hpp"
#include "util/range_table.hpp" #include "util/range_table.hpp"
#include "util/shared_memory_vector_wrapper.hpp" #include "util/shared_memory_vector_wrapper.hpp"
#include "util/simple_logger.hpp"
#include "util/static_graph.hpp" #include "util/static_graph.hpp"
#include "util/static_rtree.hpp" #include "util/static_rtree.hpp"
#include "util/typedefs.hpp" #include "util/typedefs.hpp"
@ -105,7 +106,7 @@ Storage::ReturnCode Storage::Run(int max_wait)
{ {
if (!current_regions_lock.try_lock()) if (!current_regions_lock.try_lock())
{ {
util::SimpleLogger().Write(logWARNING) << "A data update is in progress"; util::Log(logWARNING) << "A data update is in progress";
return ReturnCode::Error; return ReturnCode::Error;
} }
} }
@ -122,7 +123,7 @@ Storage::ReturnCode Storage::Run(int max_wait)
const bool lock_flags = MCL_CURRENT | MCL_FUTURE; const bool lock_flags = MCL_CURRENT | MCL_FUTURE;
if (-1 == mlockall(lock_flags)) if (-1 == mlockall(lock_flags))
{ {
util::SimpleLogger().Write(logWARNING) << "Could not request RAM lock"; util::Log(logWARNING) << "Could not request RAM lock";
} }
#endif #endif
@ -132,12 +133,12 @@ Storage::ReturnCode Storage::Run(int max_wait)
if (max_wait > 0) if (max_wait > 0)
{ {
util::SimpleLogger().Write() << "Waiting for " << max_wait util::Log() << "Waiting for " << max_wait
<< " second for all queries on the old dataset to finish:"; << " second for all queries on the old dataset to finish:";
} }
else else
{ {
util::SimpleLogger().Write() << "Waiting for all queries on the old dataset to finish:"; util::Log() << "Waiting for all queries on the old dataset to finish:";
} }
boost::interprocess::scoped_lock<boost::interprocess::named_sharable_mutex> regions_lock( boost::interprocess::scoped_lock<boost::interprocess::named_sharable_mutex> regions_lock(
@ -148,8 +149,8 @@ Storage::ReturnCode Storage::Run(int max_wait)
if (!regions_lock.timed_lock(boost::posix_time::microsec_clock::universal_time() + if (!regions_lock.timed_lock(boost::posix_time::microsec_clock::universal_time() +
boost::posix_time::seconds(max_wait))) boost::posix_time::seconds(max_wait)))
{ {
util::SimpleLogger().Write(logWARNING) << "Queries did not finish in " << max_wait util::Log(logWARNING) << "Queries did not finish in " << max_wait
<< " seconds. Claiming the lock by force."; << " seconds. Claiming the lock by force.";
// WARNING: if queries are still using the old dataset they might crash // WARNING: if queries are still using the old dataset they might crash
if (regions_layout.old_layout_region == LAYOUT_1) if (regions_layout.old_layout_region == LAYOUT_1)
{ {
@ -170,16 +171,18 @@ Storage::ReturnCode Storage::Run(int max_wait)
{ {
regions_lock.lock(); regions_lock.lock();
} }
util::SimpleLogger().Write() << "Ok."; util::Log() << "Ok.";
// since we can't change the size of a shared memory regions we delete and reallocate // since we can't change the size of a shared memory regions we delete and reallocate
if (SharedMemory::RegionExists(layout_region) && !SharedMemory::Remove(layout_region)) if (SharedMemory::RegionExists(layout_region) && !SharedMemory::Remove(layout_region))
{ {
throw util::exception("Could not remove " + regionToString(layout_region)); throw util::exception("Could not remove shared memory region " +
regionToString(layout_region) + SOURCE_REF);
} }
if (SharedMemory::RegionExists(data_region) && !SharedMemory::Remove(data_region)) if (SharedMemory::RegionExists(data_region) && !SharedMemory::Remove(data_region))
{ {
throw util::exception("Could not remove " + regionToString(data_region)); throw util::exception("Could not remove shared memory region " +
regionToString(data_region) + SOURCE_REF);
} }
// Allocate a memory layout in shared memory // Allocate a memory layout in shared memory
@ -189,8 +192,8 @@ Storage::ReturnCode Storage::Run(int max_wait)
PopulateLayout(*shared_layout_ptr); PopulateLayout(*shared_layout_ptr);
// allocate shared memory block // allocate shared memory block
util::SimpleLogger().Write() << "allocating shared memory of " util::Log() << "allocating shared memory of " << shared_layout_ptr->GetSizeOfLayout()
<< shared_layout_ptr->GetSizeOfLayout() << " bytes"; << " bytes";
auto shared_memory = makeSharedMemory(data_region, shared_layout_ptr->GetSizeOfLayout(), true); auto shared_memory = makeSharedMemory(data_region, shared_layout_ptr->GetSizeOfLayout(), true);
char *shared_memory_ptr = static_cast<char *>(shared_memory->Ptr()); char *shared_memory_ptr = static_cast<char *>(shared_memory->Ptr());
@ -207,8 +210,7 @@ Storage::ReturnCode Storage::Run(int max_wait)
if (max_wait > 0) if (max_wait > 0)
{ {
util::SimpleLogger().Write() << "Waiting for " << max_wait util::Log() << "Waiting for " << max_wait << " seconds to write new dataset timestamp";
<< " seconds to write new dataset timestamp";
auto end_time = boost::posix_time::microsec_clock::universal_time() + auto end_time = boost::posix_time::microsec_clock::universal_time() +
boost::posix_time::seconds(max_wait); boost::posix_time::seconds(max_wait);
current_regions_exclusive_lock = current_regions_exclusive_lock =
@ -217,9 +219,8 @@ Storage::ReturnCode Storage::Run(int max_wait)
if (!current_regions_exclusive_lock.owns()) if (!current_regions_exclusive_lock.owns())
{ {
util::SimpleLogger().Write(logWARNING) << "Aquiring the lock timed out after " util::Log(logWARNING) << "Aquiring the lock timed out after " << max_wait
<< max_wait << " seconds. Claiming the lock by force.";
<< " seconds. Claiming the lock by force.";
current_regions_lock.unlock(); current_regions_lock.unlock();
current_regions_lock.release(); current_regions_lock.release();
storage::SharedBarriers::resetCurrentRegions(); storage::SharedBarriers::resetCurrentRegions();
@ -228,18 +229,18 @@ Storage::ReturnCode Storage::Run(int max_wait)
} }
else else
{ {
util::SimpleLogger().Write() << "Waiting to write new dataset timestamp"; util::Log() << "Waiting to write new dataset timestamp";
current_regions_exclusive_lock = current_regions_exclusive_lock =
boost::interprocess::scoped_lock<boost::interprocess::named_upgradable_mutex>( boost::interprocess::scoped_lock<boost::interprocess::named_upgradable_mutex>(
std::move(current_regions_lock)); std::move(current_regions_lock));
} }
util::SimpleLogger().Write() << "Ok."; util::Log() << "Ok.";
data_timestamp_ptr->layout = layout_region; data_timestamp_ptr->layout = layout_region;
data_timestamp_ptr->data = data_region; data_timestamp_ptr->data = data_region;
data_timestamp_ptr->timestamp += 1; data_timestamp_ptr->timestamp += 1;
} }
util::SimpleLogger().Write() << "All data loaded."; util::Log() << "All data loaded.";
return ReturnCode::Ok; return ReturnCode::Ok;
} }
@ -260,7 +261,7 @@ void Storage::PopulateLayout(DataLayout &layout)
{ {
// collect number of elements to store in shared memory object // collect number of elements to store in shared memory object
util::SimpleLogger().Write() << "load names from: " << config.names_data_path; util::Log() << "load names from: " << config.names_data_path;
// number of entries in name index // number of entries in name index
io::FileReader name_file(config.names_data_path, io::FileReader::HasNoFingerprint); io::FileReader name_file(config.names_data_path, io::FileReader::HasNoFingerprint);

View File

@ -1,5 +1,5 @@
#include "storage/storage_config.hpp" #include "storage/storage_config.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include <boost/filesystem/operations.hpp> #include <boost/filesystem/operations.hpp>
@ -43,7 +43,7 @@ bool StorageConfig::IsValid() const
{ {
if (!boost::filesystem::is_regular_file(*path)) if (!boost::filesystem::is_regular_file(*path))
{ {
util::SimpleLogger().Write(logWARNING) << "Missing/Broken File: " << path->string(); util::Log(logWARNING) << "Missing/Broken File: " << path->string();
success = false; success = false;
} }
} }

View File

@ -2,9 +2,10 @@
#include "util/coordinate_calculation.hpp" #include "util/coordinate_calculation.hpp"
#include "util/dynamic_graph.hpp" #include "util/dynamic_graph.hpp"
#include "util/exception.hpp" #include "util/exception.hpp"
#include "util/exception_utils.hpp"
#include "util/fingerprint.hpp" #include "util/fingerprint.hpp"
#include "util/graph_loader.hpp" #include "util/graph_loader.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include "util/static_graph.hpp" #include "util/static_graph.hpp"
#include "util/typedefs.hpp" #include "util/typedefs.hpp"
@ -102,7 +103,7 @@ int main(int argc, char *argv[])
// enable logging // enable logging
if (argc < 2) if (argc < 2)
{ {
osrm::util::SimpleLogger().Write(logWARNING) << "usage:\n" << argv[0] << " <osrm>"; osrm::util::Log(logWARNING) << "usage:\n" << argv[0] << " <osrm>";
return EXIT_FAILURE; return EXIT_FAILURE;
} }
@ -115,14 +116,12 @@ int main(int argc, char *argv[])
graph_edge_list.clear(); graph_edge_list.clear();
graph_edge_list.shrink_to_fit(); graph_edge_list.shrink_to_fit();
osrm::util::SimpleLogger().Write() << "Starting SCC graph traversal"; osrm::util::Log() << "Starting SCC graph traversal";
auto tarjan = std::make_unique<osrm::extractor::TarjanSCC<osrm::tools::TarjanGraph>>(graph); auto tarjan = std::make_unique<osrm::extractor::TarjanSCC<osrm::tools::TarjanGraph>>(graph);
tarjan->Run(); tarjan->Run();
osrm::util::SimpleLogger().Write() << "identified: " << tarjan->GetNumberOfComponents() osrm::util::Log() << "identified: " << tarjan->GetNumberOfComponents() << " many components";
<< " many components"; osrm::util::Log() << "identified " << tarjan->GetSizeOneCount() << " size 1 SCCs";
osrm::util::SimpleLogger().Write() << "identified " << tarjan->GetSizeOneCount()
<< " size 1 SCCs";
// output // output
TIMER_START(SCC_RUN_SETUP); TIMER_START(SCC_RUN_SETUP);
@ -138,13 +137,13 @@ int main(int argc, char *argv[])
auto *po_driver = OGRSFDriverRegistrar::GetRegistrar()->GetDriverByName(psz_driver_name); auto *po_driver = OGRSFDriverRegistrar::GetRegistrar()->GetDriverByName(psz_driver_name);
if (nullptr == po_driver) if (nullptr == po_driver)
{ {
throw osrm::util::exception("ESRI Shapefile driver not available"); throw osrm::util::exception("ESRI Shapefile driver not available" + SOURCE_REF);
} }
auto *po_datasource = po_driver->CreateDataSource("component.shp", nullptr); auto *po_datasource = po_driver->CreateDataSource("component.shp", nullptr);
if (nullptr == po_datasource) if (nullptr == po_datasource)
{ {
throw osrm::util::exception("Creation of output file failed"); throw osrm::util::exception("Creation of output file failed" + SOURCE_REF);
} }
auto *po_srs = new OGRSpatialReference(); auto *po_srs = new OGRSpatialReference();
@ -154,55 +153,62 @@ int main(int argc, char *argv[])
if (nullptr == po_layer) if (nullptr == po_layer)
{ {
throw osrm::util::exception("Layer creation failed."); throw osrm::util::exception("Layer creation failed." + SOURCE_REF);
} }
TIMER_STOP(SCC_RUN_SETUP); TIMER_STOP(SCC_RUN_SETUP);
osrm::util::SimpleLogger().Write() << "shapefile setup took " osrm::util::Log() << "shapefile setup took " << TIMER_MSEC(SCC_RUN_SETUP) / 1000. << "s";
<< TIMER_MSEC(SCC_RUN_SETUP) / 1000. << "s";
uint64_t total_network_length = 0;
osrm::util::Percent percentage(graph->GetNumberOfNodes());
TIMER_START(SCC_OUTPUT); TIMER_START(SCC_OUTPUT);
for (const NodeID source : osrm::util::irange(0u, graph->GetNumberOfNodes())) uint64_t total_network_length = 0;
{ {
percentage.PrintIncrement(); osrm::util::UnbufferedLog log;
for (const auto current_edge : graph->GetAdjacentEdgeRange(source)) log << "Constructing geometry ";
osrm::util::Percent percentage(log, graph->GetNumberOfNodes());
for (const NodeID source : osrm::util::irange(0u, graph->GetNumberOfNodes()))
{ {
const auto target = graph->GetTarget(current_edge); percentage.PrintIncrement();
for (const auto current_edge : graph->GetAdjacentEdgeRange(source))
if (source < target || SPECIAL_EDGEID == graph->FindEdge(target, source))
{ {
total_network_length += const auto target = graph->GetTarget(current_edge);
100 * osrm::util::coordinate_calculation::greatCircleDistance(
coordinate_list[source], coordinate_list[target]);
BOOST_ASSERT(current_edge != SPECIAL_EDGEID); if (source < target || SPECIAL_EDGEID == graph->FindEdge(target, source))
BOOST_ASSERT(source != SPECIAL_NODEID);
BOOST_ASSERT(target != SPECIAL_NODEID);
const unsigned size_of_containing_component =
std::min(tarjan->GetComponentSize(tarjan->GetComponentID(source)),
tarjan->GetComponentSize(tarjan->GetComponentID(target)));
// edges that end on bollard nodes may actually be in two distinct components
if (size_of_containing_component < 1000)
{ {
OGRLineString line_string; total_network_length +=
line_string.addPoint( 100 * osrm::util::coordinate_calculation::greatCircleDistance(
static_cast<double>(osrm::util::toFloating(coordinate_list[source].lon)), coordinate_list[source], coordinate_list[target]);
static_cast<double>(osrm::util::toFloating(coordinate_list[source].lat)));
line_string.addPoint(
static_cast<double>(osrm::util::toFloating(coordinate_list[target].lon)),
static_cast<double>(osrm::util::toFloating(coordinate_list[target].lat)));
OGRFeature *po_feature = OGRFeature::CreateFeature(po_layer->GetLayerDefn()); BOOST_ASSERT(current_edge != SPECIAL_EDGEID);
BOOST_ASSERT(source != SPECIAL_NODEID);
BOOST_ASSERT(target != SPECIAL_NODEID);
po_feature->SetGeometry(&line_string); const unsigned size_of_containing_component =
if (OGRERR_NONE != po_layer->CreateFeature(po_feature)) std::min(tarjan->GetComponentSize(tarjan->GetComponentID(source)),
tarjan->GetComponentSize(tarjan->GetComponentID(target)));
// edges that end on bollard nodes may actually be in two distinct components
if (size_of_containing_component < 1000)
{ {
throw osrm::util::exception("Failed to create feature in shapefile."); OGRLineString line_string;
line_string.addPoint(static_cast<double>(osrm::util::toFloating(
coordinate_list[source].lon)),
static_cast<double>(osrm::util::toFloating(
coordinate_list[source].lat)));
line_string.addPoint(static_cast<double>(osrm::util::toFloating(
coordinate_list[target].lon)),
static_cast<double>(osrm::util::toFloating(
coordinate_list[target].lat)));
OGRFeature *po_feature =
OGRFeature::CreateFeature(po_layer->GetLayerDefn());
po_feature->SetGeometry(&line_string);
if (OGRERR_NONE != po_layer->CreateFeature(po_feature))
{
throw osrm::util::exception("Failed to create feature in shapefile." +
SOURCE_REF);
}
OGRFeature::DestroyFeature(po_feature);
} }
OGRFeature::DestroyFeature(po_feature);
} }
} }
} }
@ -210,13 +216,11 @@ int main(int argc, char *argv[])
OGRSpatialReference::DestroySpatialReference(po_srs); OGRSpatialReference::DestroySpatialReference(po_srs);
OGRDataSource::DestroyDataSource(po_datasource); OGRDataSource::DestroyDataSource(po_datasource);
TIMER_STOP(SCC_OUTPUT); TIMER_STOP(SCC_OUTPUT);
osrm::util::SimpleLogger().Write() osrm::util::Log() << "generating output took: " << TIMER_MSEC(SCC_OUTPUT) / 1000. << "s";
<< "generating output took: " << TIMER_MSEC(SCC_OUTPUT) / 1000. << "s";
osrm::util::SimpleLogger().Write() osrm::util::Log() << "total network distance: "
<< "total network distance: " << static_cast<uint64_t>(total_network_length / 100 / 1000.) << static_cast<uint64_t>(total_network_length / 100 / 1000.) << " km";
<< " km";
osrm::util::SimpleLogger().Write() << "finished component analysis"; osrm::util::Log() << "finished component analysis";
return EXIT_SUCCESS; return EXIT_SUCCESS;
} }

View File

@ -1,6 +1,7 @@
#include "contractor/contractor.hpp" #include "contractor/contractor.hpp"
#include "contractor/contractor_config.hpp" #include "contractor/contractor_config.hpp"
#include "util/simple_logger.hpp" #include "util/exception.hpp"
#include "util/log.hpp"
#include "util/version.hpp" #include "util/version.hpp"
#include <boost/filesystem.hpp> #include <boost/filesystem.hpp>
@ -92,19 +93,19 @@ return_code parseArguments(int argc, char *argv[], contractor::ContractorConfig
} }
catch (const boost::program_options::error &e) catch (const boost::program_options::error &e)
{ {
util::SimpleLogger().Write(logWARNING) << "[error] " << e.what(); util::Log(logERROR) << e.what();
return return_code::fail; return return_code::fail;
} }
if (option_variables.count("version")) if (option_variables.count("version"))
{ {
util::SimpleLogger().Write() << OSRM_VERSION; std::cout << OSRM_VERSION << std::endl;
return return_code::exit; return return_code::exit;
} }
if (option_variables.count("help")) if (option_variables.count("help"))
{ {
util::SimpleLogger().Write() << visible_options; std::cout << visible_options;
return return_code::exit; return return_code::exit;
} }
@ -112,7 +113,7 @@ return_code parseArguments(int argc, char *argv[], contractor::ContractorConfig
if (!option_variables.count("input")) if (!option_variables.count("input"))
{ {
util::SimpleLogger().Write() << visible_options; std::cout << visible_options;
return return_code::fail; return return_code::fail;
} }
@ -121,6 +122,7 @@ return_code parseArguments(int argc, char *argv[], contractor::ContractorConfig
int main(int argc, char *argv[]) try int main(int argc, char *argv[]) try
{ {
util::LogPolicy::GetInstance().Unmute(); util::LogPolicy::GetInstance().Unmute();
contractor::ContractorConfig contractor_config; contractor::ContractorConfig contractor_config;
@ -140,7 +142,7 @@ int main(int argc, char *argv[]) try
if (1 > contractor_config.requested_num_threads) if (1 > contractor_config.requested_num_threads)
{ {
util::SimpleLogger().Write(logWARNING) << "Number of threads must be 1 or larger"; util::Log(logERROR) << "Number of threads must be 1 or larger";
return EXIT_FAILURE; return EXIT_FAILURE;
} }
@ -148,21 +150,19 @@ int main(int argc, char *argv[]) try
if (recommended_num_threads != contractor_config.requested_num_threads) if (recommended_num_threads != contractor_config.requested_num_threads)
{ {
util::SimpleLogger().Write(logWARNING) util::Log(logWARNING) << "The recommended number of threads is " << recommended_num_threads
<< "The recommended number of threads is " << recommended_num_threads << "! This setting may have performance side-effects.";
<< "! This setting may have performance side-effects.";
} }
if (!boost::filesystem::is_regular_file(contractor_config.osrm_input_path)) if (!boost::filesystem::is_regular_file(contractor_config.osrm_input_path))
{ {
util::SimpleLogger().Write(logWARNING) util::Log(logERROR) << "Input file " << contractor_config.osrm_input_path.string()
<< "Input file " << contractor_config.osrm_input_path.string() << " not found!"; << " not found!";
return EXIT_FAILURE; return EXIT_FAILURE;
} }
util::SimpleLogger().Write() << "Input file: " util::Log() << "Input file: " << contractor_config.osrm_input_path.filename().string();
<< contractor_config.osrm_input_path.filename().string(); util::Log() << "Threads: " << contractor_config.requested_num_threads;
util::SimpleLogger().Write() << "Threads: " << contractor_config.requested_num_threads;
tbb::task_scheduler_init init(contractor_config.requested_num_threads); tbb::task_scheduler_init init(contractor_config.requested_num_threads);
@ -170,8 +170,7 @@ int main(int argc, char *argv[]) try
} }
catch (const std::bad_alloc &e) catch (const std::bad_alloc &e)
{ {
util::SimpleLogger().Write(logWARNING) << "[exception] " << e.what(); util::Log(logERROR) << "[exception] " << e.what();
util::SimpleLogger().Write(logWARNING) util::Log(logERROR) << "Please provide more memory or consider using a larger swapfile";
<< "Please provide more memory or consider using a larger swapfile";
return EXIT_FAILURE; return EXIT_FAILURE;
} }

View File

@ -1,7 +1,7 @@
#include "extractor/extractor.hpp" #include "extractor/extractor.hpp"
#include "extractor/extractor_config.hpp" #include "extractor/extractor_config.hpp"
#include "extractor/scripting_environment_lua.hpp" #include "extractor/scripting_environment_lua.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include "util/version.hpp" #include "util/version.hpp"
#include <tbb/task_scheduler_init.h> #include <tbb/task_scheduler_init.h>
@ -84,19 +84,19 @@ return_code parseArguments(int argc, char *argv[], extractor::ExtractorConfig &e
} }
catch (const boost::program_options::error &e) catch (const boost::program_options::error &e)
{ {
util::SimpleLogger().Write(logWARNING) << "[error] " << e.what(); util::Log(logERROR) << e.what();
return return_code::fail; return return_code::fail;
} }
if (option_variables.count("version")) if (option_variables.count("version"))
{ {
util::SimpleLogger().Write() << OSRM_VERSION; std::cout << OSRM_VERSION << std::endl;
return return_code::exit; return return_code::exit;
} }
if (option_variables.count("help")) if (option_variables.count("help"))
{ {
util::SimpleLogger().Write() << visible_options; std::cout << visible_options;
return return_code::exit; return return_code::exit;
} }
@ -104,7 +104,7 @@ return_code parseArguments(int argc, char *argv[], extractor::ExtractorConfig &e
if (!option_variables.count("input")) if (!option_variables.count("input"))
{ {
util::SimpleLogger().Write() << visible_options; std::cout << visible_options;
return return_code::exit; return return_code::exit;
} }
@ -132,21 +132,21 @@ int main(int argc, char *argv[]) try
if (1 > extractor_config.requested_num_threads) if (1 > extractor_config.requested_num_threads)
{ {
util::SimpleLogger().Write(logWARNING) << "Number of threads must be 1 or larger"; util::Log(logERROR) << "Number of threads must be 1 or larger";
return EXIT_FAILURE; return EXIT_FAILURE;
} }
if (!boost::filesystem::is_regular_file(extractor_config.input_path)) if (!boost::filesystem::is_regular_file(extractor_config.input_path))
{ {
util::SimpleLogger().Write(logWARNING) util::Log(logERROR) << "Input file " << extractor_config.input_path.string()
<< "Input file " << extractor_config.input_path.string() << " not found!"; << " not found!";
return EXIT_FAILURE; return EXIT_FAILURE;
} }
if (!boost::filesystem::is_regular_file(extractor_config.profile_path)) if (!boost::filesystem::is_regular_file(extractor_config.profile_path))
{ {
util::SimpleLogger().Write(logWARNING) util::Log(logERROR) << "Profile " << extractor_config.profile_path.string()
<< "Profile " << extractor_config.profile_path.string() << " not found!"; << " not found!";
return EXIT_FAILURE; return EXIT_FAILURE;
} }
@ -157,8 +157,7 @@ int main(int argc, char *argv[]) try
} }
catch (const std::bad_alloc &e) catch (const std::bad_alloc &e)
{ {
util::SimpleLogger().Write(logWARNING) << "[exception] " << e.what(); util::Log(logERROR) << "[exception] " << e.what();
util::SimpleLogger().Write(logWARNING) util::Log(logERROR) << "Please provide more memory or consider using a larger swapfile";
<< "Please provide more memory or consider using a larger swapfile";
return EXIT_FAILURE; return EXIT_FAILURE;
} }

View File

@ -1,5 +1,6 @@
#include "util/exception.hpp" #include "util/exception.hpp"
#include "util/simple_logger.hpp" #include "util/exception_utils.hpp"
#include "util/log.hpp"
#include "util/timing_util.hpp" #include "util/timing_util.hpp"
#include <boost/filesystem.hpp> #include <boost/filesystem.hpp>
@ -53,24 +54,24 @@ int main(int argc, char *argv[])
{ {
#ifdef __FreeBSD__ #ifdef __FreeBSD__
osrm::util::SimpleLogger().Write() << "Not supported on FreeBSD"; osrm::util::Log() << "Not supported on FreeBSD";
return 0; return 0;
#endif #endif
#ifdef _WIN32 #ifdef _WIN32
osrm::util::SimpleLogger().Write() << "Not supported on Windows"; osrm::util::Log() << "Not supported on Windows";
return 0; return 0;
#else #else
osrm::util::LogPolicy::GetInstance().Unmute(); osrm::util::LogPolicy::GetInstance().Unmute();
if (1 == argc) if (1 == argc)
{ {
osrm::util::SimpleLogger().Write(logWARNING) << "usage: " << argv[0] << " /path/on/device"; osrm::util::Log(logWARNING) << "usage: " << argv[0] << " /path/on/device";
return -1; return -1;
} }
test_path = boost::filesystem::path(argv[1]); test_path = boost::filesystem::path(argv[1]);
test_path /= "osrm.tst"; test_path /= "osrm.tst";
osrm::util::SimpleLogger().Write(logDEBUG) << "temporary file: " << test_path.string(); osrm::util::Log(logDEBUG) << "temporary file: " << test_path.string();
// create files for testing // create files for testing
if (2 == argc) if (2 == argc)
@ -78,7 +79,8 @@ int main(int argc, char *argv[])
// create file to test // create file to test
if (boost::filesystem::exists(test_path)) if (boost::filesystem::exists(test_path))
{ {
throw osrm::util::exception("Data file already exists"); throw osrm::util::exception("Data file already exists: " + test_path.string() +
SOURCE_REF);
} }
int *random_array = new int[osrm::tools::NUMBER_OF_ELEMENTS]; int *random_array = new int[osrm::tools::NUMBER_OF_ELEMENTS];
@ -97,34 +99,33 @@ int main(int argc, char *argv[])
open(test_path.string().c_str(), O_CREAT | O_TRUNC | O_WRONLY | O_SYNC, S_IRWXU); open(test_path.string().c_str(), O_CREAT | O_TRUNC | O_WRONLY | O_SYNC, S_IRWXU);
if (-1 == file_desc) if (-1 == file_desc)
{ {
throw osrm::util::exception("Could not open random data file"); throw osrm::util::exception("Could not open random data file" + test_path.string() +
SOURCE_REF);
} }
TIMER_START(write_1gb); TIMER_START(write_1gb);
int ret = int ret =
write(file_desc, random_array, osrm::tools::NUMBER_OF_ELEMENTS * sizeof(unsigned)); write(file_desc, random_array, osrm::tools::NUMBER_OF_ELEMENTS * sizeof(unsigned));
if (0 > ret) if (0 > ret)
{ {
throw osrm::util::exception("could not write random data file"); throw osrm::util::exception("could not write random data file" + test_path.string() +
SOURCE_REF);
} }
TIMER_STOP(write_1gb); TIMER_STOP(write_1gb);
close(file_desc); close(file_desc);
#endif #endif
delete[] random_array; delete[] random_array;
osrm::util::SimpleLogger().Write(logDEBUG) << "writing raw 1GB took " osrm::util::Log(logDEBUG) << "writing raw 1GB took " << TIMER_SEC(write_1gb) << "s";
<< TIMER_SEC(write_1gb) << "s"; osrm::util::Log() << "raw write performance: " << std::setprecision(5) << std::fixed
osrm::util::SimpleLogger().Write() << "raw write performance: " << std::setprecision(5) << 1024 * 1024 / TIMER_SEC(write_1gb) << "MB/sec";
<< std::fixed << 1024 * 1024 / TIMER_SEC(write_1gb)
<< "MB/sec";
osrm::util::SimpleLogger().Write(logDEBUG) osrm::util::Log(logDEBUG) << "finished creation of random data. Flush disk cache now!";
<< "finished creation of random data. Flush disk cache now!";
} }
else else
{ {
// Run Non-Cached I/O benchmarks // Run Non-Cached I/O benchmarks
if (!boost::filesystem::exists(test_path)) if (!boost::filesystem::exists(test_path))
{ {
throw osrm::util::exception("data file does not exist"); throw osrm::util::exception("data file does not exist" + SOURCE_REF);
} }
// volatiles do not get optimized // volatiles do not get optimized
@ -143,7 +144,7 @@ int main(int argc, char *argv[])
int file_desc = open(test_path.string().c_str(), O_RDONLY | O_DIRECT | O_SYNC); int file_desc = open(test_path.string().c_str(), O_RDONLY | O_DIRECT | O_SYNC);
if (-1 == file_desc) if (-1 == file_desc)
{ {
osrm::util::SimpleLogger().Write(logDEBUG) << "opened, error: " << strerror(errno); osrm::util::Log(logDEBUG) << "opened, error: " << strerror(errno);
return -1; return -1;
} }
char *raw_array = (char *)memalign(512, osrm::tools::NUMBER_OF_ELEMENTS * sizeof(unsigned)); char *raw_array = (char *)memalign(512, osrm::tools::NUMBER_OF_ELEMENTS * sizeof(unsigned));
@ -156,22 +157,19 @@ int main(int argc, char *argv[])
#endif #endif
#ifdef __linux__ #ifdef __linux__
int ret = read(file_desc, raw_array, osrm::tools::NUMBER_OF_ELEMENTS * sizeof(unsigned)); int ret = read(file_desc, raw_array, osrm::tools::NUMBER_OF_ELEMENTS * sizeof(unsigned));
osrm::util::SimpleLogger().Write(logDEBUG) << "read " << ret osrm::util::Log(logDEBUG) << "read " << ret << " bytes, error: " << strerror(errno);
<< " bytes, error: " << strerror(errno);
close(file_desc); close(file_desc);
file_desc = open(test_path.string().c_str(), O_RDONLY | O_DIRECT | O_SYNC); file_desc = open(test_path.string().c_str(), O_RDONLY | O_DIRECT | O_SYNC);
osrm::util::SimpleLogger().Write(logDEBUG) << "opened, error: " << strerror(errno); osrm::util::Log(logDEBUG) << "opened, error: " << strerror(errno);
#endif #endif
TIMER_STOP(read_1gb); TIMER_STOP(read_1gb);
osrm::util::SimpleLogger().Write(logDEBUG) << "reading raw 1GB took " << TIMER_SEC(read_1gb) osrm::util::Log(logDEBUG) << "reading raw 1GB took " << TIMER_SEC(read_1gb) << "s";
<< "s"; osrm::util::Log() << "raw read performance: " << std::setprecision(5) << std::fixed
osrm::util::SimpleLogger().Write() << "raw read performance: " << std::setprecision(5) << 1024 * 1024 / TIMER_SEC(read_1gb) << "MB/sec";
<< std::fixed << 1024 * 1024 / TIMER_SEC(read_1gb)
<< "MB/sec";
std::vector<double> timing_results_raw_random; std::vector<double> timing_results_raw_random;
osrm::util::SimpleLogger().Write(logDEBUG) << "running 1000 random I/Os of 4KB"; osrm::util::Log(logDEBUG) << "running 1000 random I/Os of 4KB";
#ifdef __APPLE__ #ifdef __APPLE__
fseek(fd, 0, SEEK_SET); fseek(fd, 0, SEEK_SET);
@ -206,21 +204,21 @@ int main(int argc, char *argv[])
TIMER_STOP(random_access); TIMER_STOP(random_access);
if (((off_t)-1) == ret1) if (((off_t)-1) == ret1)
{ {
osrm::util::SimpleLogger().Write(logWARNING) << "offset: " << current_offset; osrm::util::Log(logWARNING) << "offset: " << current_offset;
osrm::util::SimpleLogger().Write(logWARNING) << "seek error " << strerror(errno); osrm::util::Log(logWARNING) << "seek error " << strerror(errno);
throw osrm::util::exception("seek error"); throw osrm::util::exception("seek error" + SOURCE_REF);
} }
if (-1 == ret2) if (-1 == ret2)
{ {
osrm::util::SimpleLogger().Write(logWARNING) << "offset: " << current_offset; osrm::util::Log(logWARNING) << "offset: " << current_offset;
osrm::util::SimpleLogger().Write(logWARNING) << "read error " << strerror(errno); osrm::util::Log(logWARNING) << "read error " << strerror(errno);
throw osrm::util::exception("read error"); throw osrm::util::exception("read error" + SOURCE_REF);
} }
timing_results_raw_random.push_back(TIMER_SEC(random_access)); timing_results_raw_random.push_back(TIMER_SEC(random_access));
} }
// Do statistics // Do statistics
osrm::util::SimpleLogger().Write(logDEBUG) << "running raw random I/O statistics"; osrm::util::Log(logDEBUG) << "running raw random I/O statistics";
std::ofstream random_csv("random.csv", std::ios::trunc); std::ofstream random_csv("random.csv", std::ios::trunc);
for (unsigned i = 0; i < timing_results_raw_random.size(); ++i) for (unsigned i = 0; i < timing_results_raw_random.size(); ++i)
{ {
@ -228,12 +226,12 @@ int main(int argc, char *argv[])
} }
osrm::tools::runStatistics(timing_results_raw_random, stats); osrm::tools::runStatistics(timing_results_raw_random, stats);
osrm::util::SimpleLogger().Write() << "raw random I/O: " << std::setprecision(5) osrm::util::Log() << "raw random I/O: " << std::setprecision(5) << std::fixed
<< std::fixed << "min: " << stats.min << "ms, " << "min: " << stats.min << "ms, "
<< "mean: " << stats.mean << "ms, " << "mean: " << stats.mean << "ms, "
<< "med: " << stats.med << "ms, " << "med: " << stats.med << "ms, "
<< "max: " << stats.max << "ms, " << "max: " << stats.max << "ms, "
<< "dev: " << stats.dev << "ms"; << "dev: " << stats.dev << "ms";
std::vector<double> timing_results_raw_seq; std::vector<double> timing_results_raw_seq;
#ifdef __APPLE__ #ifdef __APPLE__
@ -266,15 +264,15 @@ int main(int argc, char *argv[])
TIMER_STOP(read_every_100); TIMER_STOP(read_every_100);
if (((off_t)-1) == ret1) if (((off_t)-1) == ret1)
{ {
osrm::util::SimpleLogger().Write(logWARNING) << "offset: " << current_offset; osrm::util::Log(logWARNING) << "offset: " << current_offset;
osrm::util::SimpleLogger().Write(logWARNING) << "seek error " << strerror(errno); osrm::util::Log(logWARNING) << "seek error " << strerror(errno);
throw osrm::util::exception("seek error"); throw osrm::util::exception("seek error" + SOURCE_REF);
} }
if (-1 == ret2) if (-1 == ret2)
{ {
osrm::util::SimpleLogger().Write(logWARNING) << "offset: " << current_offset; osrm::util::Log(logWARNING) << "offset: " << current_offset;
osrm::util::SimpleLogger().Write(logWARNING) << "read error " << strerror(errno); osrm::util::Log(logWARNING) << "read error " << strerror(errno);
throw osrm::util::exception("read error"); throw osrm::util::exception("read error" + SOURCE_REF);
} }
timing_results_raw_seq.push_back(TIMER_SEC(read_every_100)); timing_results_raw_seq.push_back(TIMER_SEC(read_every_100));
} }
@ -288,7 +286,7 @@ int main(int argc, char *argv[])
close(file_desc); close(file_desc);
#endif #endif
// Do statistics // Do statistics
osrm::util::SimpleLogger().Write(logDEBUG) << "running sequential I/O statistics"; osrm::util::Log(logDEBUG) << "running sequential I/O statistics";
// print simple statistics: min, max, median, variance // print simple statistics: min, max, median, variance
std::ofstream seq_csv("sequential.csv", std::ios::trunc); std::ofstream seq_csv("sequential.csv", std::ios::trunc);
for (unsigned i = 0; i < timing_results_raw_seq.size(); ++i) for (unsigned i = 0; i < timing_results_raw_seq.size(); ++i)
@ -296,17 +294,17 @@ int main(int argc, char *argv[])
seq_csv << i << ", " << timing_results_raw_seq[i] << std::endl; seq_csv << i << ", " << timing_results_raw_seq[i] << std::endl;
} }
osrm::tools::runStatistics(timing_results_raw_seq, stats); osrm::tools::runStatistics(timing_results_raw_seq, stats);
osrm::util::SimpleLogger().Write() << "raw sequential I/O: " << std::setprecision(5) osrm::util::Log() << "raw sequential I/O: " << std::setprecision(5) << std::fixed
<< std::fixed << "min: " << stats.min << "ms, " << "min: " << stats.min << "ms, "
<< "mean: " << stats.mean << "ms, " << "mean: " << stats.mean << "ms, "
<< "med: " << stats.med << "ms, " << "med: " << stats.med << "ms, "
<< "max: " << stats.max << "ms, " << "max: " << stats.max << "ms, "
<< "dev: " << stats.dev << "ms"; << "dev: " << stats.dev << "ms";
if (boost::filesystem::exists(test_path)) if (boost::filesystem::exists(test_path))
{ {
boost::filesystem::remove(test_path); boost::filesystem::remove(test_path);
osrm::util::SimpleLogger().Write(logDEBUG) << "removing temporary files"; osrm::util::Log(logDEBUG) << "removing temporary files";
} }
} }
return EXIT_SUCCESS; return EXIT_SUCCESS;

View File

@ -1,5 +1,5 @@
#include "server/server.hpp" #include "server/server.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include "util/version.hpp" #include "util/version.hpp"
#include "osrm/engine_config.hpp" #include "osrm/engine_config.hpp"
@ -136,19 +136,19 @@ inline unsigned generateServerProgramOptions(const int argc,
} }
catch (const boost::program_options::error &e) catch (const boost::program_options::error &e)
{ {
util::SimpleLogger().Write(logWARNING) << "[error] " << e.what(); util::Log(logERROR) << e.what();
return INIT_FAILED; return INIT_FAILED;
} }
if (option_variables.count("version")) if (option_variables.count("version"))
{ {
util::SimpleLogger().Write() << OSRM_VERSION; std::cout << OSRM_VERSION << std::endl;
return INIT_OK_DO_NOT_START_ENGINE; return INIT_OK_DO_NOT_START_ENGINE;
} }
if (option_variables.count("help")) if (option_variables.count("help"))
{ {
util::SimpleLogger().Write() << visible_options; std::cout << visible_options;
return INIT_OK_DO_NOT_START_ENGINE; return INIT_OK_DO_NOT_START_ENGINE;
} }
@ -164,11 +164,10 @@ inline unsigned generateServerProgramOptions(const int argc,
} }
else if (use_shared_memory && option_variables.count("base")) else if (use_shared_memory && option_variables.count("base"))
{ {
util::SimpleLogger().Write(logWARNING) util::Log(logWARNING) << "Shared memory settings conflict with path settings.";
<< "Shared memory settings conflict with path settings.";
} }
util::SimpleLogger().Write() << visible_options; std::cout << visible_options;
return INIT_OK_DO_NOT_START_ENGINE; return INIT_OK_DO_NOT_START_ENGINE;
} }
@ -211,69 +210,59 @@ int main(int argc, const char *argv[]) try
{ {
if (base_path.empty() != config.use_shared_memory) if (base_path.empty() != config.use_shared_memory)
{ {
util::SimpleLogger().Write(logWARNING) << "Path settings and shared memory conflicts."; util::Log(logWARNING) << "Path settings and shared memory conflicts.";
} }
else else
{ {
if (!boost::filesystem::is_regular_file(config.storage_config.ram_index_path)) if (!boost::filesystem::is_regular_file(config.storage_config.ram_index_path))
{ {
util::SimpleLogger().Write(logWARNING) << config.storage_config.ram_index_path util::Log(logWARNING) << config.storage_config.ram_index_path << " is not found";
<< " is not found";
} }
if (!boost::filesystem::is_regular_file(config.storage_config.file_index_path)) if (!boost::filesystem::is_regular_file(config.storage_config.file_index_path))
{ {
util::SimpleLogger().Write(logWARNING) << config.storage_config.file_index_path util::Log(logWARNING) << config.storage_config.file_index_path << " is not found";
<< " is not found";
} }
if (!boost::filesystem::is_regular_file(config.storage_config.hsgr_data_path)) if (!boost::filesystem::is_regular_file(config.storage_config.hsgr_data_path))
{ {
util::SimpleLogger().Write(logWARNING) << config.storage_config.hsgr_data_path util::Log(logWARNING) << config.storage_config.hsgr_data_path << " is not found";
<< " is not found";
} }
if (!boost::filesystem::is_regular_file(config.storage_config.nodes_data_path)) if (!boost::filesystem::is_regular_file(config.storage_config.nodes_data_path))
{ {
util::SimpleLogger().Write(logWARNING) << config.storage_config.nodes_data_path util::Log(logWARNING) << config.storage_config.nodes_data_path << " is not found";
<< " is not found";
} }
if (!boost::filesystem::is_regular_file(config.storage_config.edges_data_path)) if (!boost::filesystem::is_regular_file(config.storage_config.edges_data_path))
{ {
util::SimpleLogger().Write(logWARNING) << config.storage_config.edges_data_path util::Log(logWARNING) << config.storage_config.edges_data_path << " is not found";
<< " is not found";
} }
if (!boost::filesystem::is_regular_file(config.storage_config.core_data_path)) if (!boost::filesystem::is_regular_file(config.storage_config.core_data_path))
{ {
util::SimpleLogger().Write(logWARNING) << config.storage_config.core_data_path util::Log(logWARNING) << config.storage_config.core_data_path << " is not found";
<< " is not found";
} }
if (!boost::filesystem::is_regular_file(config.storage_config.geometries_path)) if (!boost::filesystem::is_regular_file(config.storage_config.geometries_path))
{ {
util::SimpleLogger().Write(logWARNING) << config.storage_config.geometries_path util::Log(logWARNING) << config.storage_config.geometries_path << " is not found";
<< " is not found";
} }
if (!boost::filesystem::is_regular_file(config.storage_config.timestamp_path)) if (!boost::filesystem::is_regular_file(config.storage_config.timestamp_path))
{ {
util::SimpleLogger().Write(logWARNING) << config.storage_config.timestamp_path util::Log(logWARNING) << config.storage_config.timestamp_path << " is not found";
<< " is not found";
} }
if (!boost::filesystem::is_regular_file(config.storage_config.datasource_names_path)) if (!boost::filesystem::is_regular_file(config.storage_config.datasource_names_path))
{ {
util::SimpleLogger().Write(logWARNING) util::Log(logWARNING) << config.storage_config.datasource_names_path
<< config.storage_config.datasource_names_path << " is not found"; << " is not found";
} }
if (!boost::filesystem::is_regular_file(config.storage_config.datasource_indexes_path)) if (!boost::filesystem::is_regular_file(config.storage_config.datasource_indexes_path))
{ {
util::SimpleLogger().Write(logWARNING) util::Log(logWARNING) << config.storage_config.datasource_indexes_path
<< config.storage_config.datasource_indexes_path << " is not found"; << " is not found";
} }
if (!boost::filesystem::is_regular_file(config.storage_config.names_data_path)) if (!boost::filesystem::is_regular_file(config.storage_config.names_data_path))
{ {
util::SimpleLogger().Write(logWARNING) << config.storage_config.names_data_path util::Log(logWARNING) << config.storage_config.names_data_path << " is not found";
<< " is not found";
} }
if (!boost::filesystem::is_regular_file(config.storage_config.properties_path)) if (!boost::filesystem::is_regular_file(config.storage_config.properties_path))
{ {
util::SimpleLogger().Write(logWARNING) << config.storage_config.properties_path util::Log(logWARNING) << config.storage_config.properties_path << " is not found";
<< " is not found";
} }
} }
return EXIT_FAILURE; return EXIT_FAILURE;
@ -287,7 +276,7 @@ int main(int argc, const char *argv[]) try
if (should_lock && -1 == mlockall(MCL_CURRENT | MCL_FUTURE)) if (should_lock && -1 == mlockall(MCL_CURRENT | MCL_FUTURE))
{ {
could_lock = false; could_lock = false;
util::SimpleLogger().Write(logWARNING) << "memory could not be locked to RAM"; util::Log(logWARNING) << "memory could not be locked to RAM";
} }
} }
~MemoryLocker() ~MemoryLocker()
@ -298,16 +287,16 @@ int main(int argc, const char *argv[]) try
bool should_lock = false, could_lock = true; bool should_lock = false, could_lock = true;
} memory_locker(config.use_shared_memory); } memory_locker(config.use_shared_memory);
#endif #endif
util::SimpleLogger().Write() << "starting up engines, " << OSRM_VERSION; util::Log() << "starting up engines, " << OSRM_VERSION;
if (config.use_shared_memory) if (config.use_shared_memory)
{ {
util::SimpleLogger().Write() << "Loading from shared memory"; util::Log() << "Loading from shared memory";
} }
util::SimpleLogger().Write() << "Threads: " << requested_thread_num; util::Log() << "Threads: " << requested_thread_num;
util::SimpleLogger().Write() << "IP address: " << ip_address; util::Log() << "IP address: " << ip_address;
util::SimpleLogger().Write() << "IP port: " << ip_port; util::Log() << "IP port: " << ip_port;
#ifndef _WIN32 #ifndef _WIN32
int sig = 0; int sig = 0;
@ -324,7 +313,7 @@ int main(int argc, const char *argv[]) try
if (trial_run) if (trial_run)
{ {
util::SimpleLogger().Write() << "trial run, quitting after successful initialization"; util::Log() << "trial run, quitting after successful initialization";
} }
else else
{ {
@ -343,7 +332,7 @@ int main(int argc, const char *argv[]) try
sigaddset(&wait_mask, SIGQUIT); sigaddset(&wait_mask, SIGQUIT);
sigaddset(&wait_mask, SIGTERM); sigaddset(&wait_mask, SIGTERM);
pthread_sigmask(SIG_BLOCK, &wait_mask, nullptr); pthread_sigmask(SIG_BLOCK, &wait_mask, nullptr);
util::SimpleLogger().Write() << "running and waiting for requests"; util::Log() << "running and waiting for requests";
if (std::getenv("SIGNAL_PARENT_WHEN_READY")) if (std::getenv("SIGNAL_PARENT_WHEN_READY"))
{ {
kill(getppid(), SIGUSR1); kill(getppid(), SIGUSR1);
@ -353,12 +342,12 @@ int main(int argc, const char *argv[]) try
// Set console control handler to allow server to be stopped. // Set console control handler to allow server to be stopped.
console_ctrl_function = std::bind(&server::Server::Stop, routing_server); console_ctrl_function = std::bind(&server::Server::Stop, routing_server);
SetConsoleCtrlHandler(console_ctrl_handler, TRUE); SetConsoleCtrlHandler(console_ctrl_handler, TRUE);
util::SimpleLogger().Write() << "running and waiting for requests"; util::Log() << "running and waiting for requests";
routing_server->Run(); routing_server->Run();
#endif #endif
util::SimpleLogger().Write() << "initiating shutdown"; util::Log() << "initiating shutdown";
routing_server->Stop(); routing_server->Stop();
util::SimpleLogger().Write() << "stopping threads"; util::Log() << "stopping threads";
auto status = future.wait_for(std::chrono::seconds(2)); auto status = future.wait_for(std::chrono::seconds(2));
@ -368,19 +357,18 @@ int main(int argc, const char *argv[]) try
} }
else else
{ {
util::SimpleLogger().Write(logWARNING) << "Didn't exit within 2 seconds. Hard abort!"; util::Log(logWARNING) << "Didn't exit within 2 seconds. Hard abort!";
server_task.reset(); // just kill it server_task.reset(); // just kill it
} }
} }
util::SimpleLogger().Write() << "freeing objects"; util::Log() << "freeing objects";
routing_server.reset(); routing_server.reset();
util::SimpleLogger().Write() << "shutdown completed"; util::Log() << "shutdown completed";
} }
catch (const std::bad_alloc &e) catch (const std::bad_alloc &e)
{ {
util::SimpleLogger().Write(logWARNING) << "[exception] " << e.what(); util::Log(logWARNING) << "[exception] " << e.what();
util::SimpleLogger().Write(logWARNING) util::Log(logWARNING) << "Please provide more memory or consider using a larger swapfile";
<< "Please provide more memory or consider using a larger swapfile";
return EXIT_FAILURE; return EXIT_FAILURE;
} }

View File

@ -2,7 +2,7 @@
#include "storage/shared_datatype.hpp" #include "storage/shared_datatype.hpp"
#include "storage/shared_memory.hpp" #include "storage/shared_memory.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
namespace osrm namespace osrm
{ {
@ -36,14 +36,14 @@ void deleteRegion(const SharedDataType region)
} }
}(); }();
util::SimpleLogger().Write(logWARNING) << "could not delete shared memory region " << name; util::Log(logWARNING) << "could not delete shared memory region " << name;
} }
} }
// find all existing shmem regions and remove them. // find all existing shmem regions and remove them.
void springclean() void springclean()
{ {
util::SimpleLogger().Write() << "spring-cleaning all shared memory regions"; util::Log() << "spring-cleaning all shared memory regions";
deleteRegion(DATA_1); deleteRegion(DATA_1);
deleteRegion(LAYOUT_1); deleteRegion(LAYOUT_1);
deleteRegion(DATA_2); deleteRegion(DATA_2);
@ -56,19 +56,18 @@ void springclean()
int main() int main()
{ {
osrm::util::LogPolicy::GetInstance().Unmute(); osrm::util::LogPolicy::GetInstance().Unmute();
osrm::util::SimpleLogger().Write() << "Releasing all locks"; osrm::util::Log() << "Releasing all locks";
osrm::util::SimpleLogger().Write() << "ATTENTION! BE CAREFUL!"; osrm::util::Log() << "ATTENTION! BE CAREFUL!";
osrm::util::SimpleLogger().Write() << "----------------------"; osrm::util::Log() << "----------------------";
osrm::util::SimpleLogger().Write() << "This tool may put osrm-routed into an undefined state!"; osrm::util::Log() << "This tool may put osrm-routed into an undefined state!";
osrm::util::SimpleLogger().Write() osrm::util::Log() << "Type 'Y' to acknowledge that you know what your are doing.";
<< "Type 'Y' to acknowledge that you know what your are doing."; osrm::util::Log() << "\n\nDo you want to purge all shared memory allocated "
osrm::util::SimpleLogger().Write() << "\n\nDo you want to purge all shared memory allocated " << "by osrm-datastore? [type 'Y' to confirm]";
<< "by osrm-datastore? [type 'Y' to confirm]";
const auto letter = getchar(); const auto letter = getchar();
if (letter != 'Y') if (letter != 'Y')
{ {
osrm::util::SimpleLogger().Write() << "aborted."; osrm::util::Log() << "aborted.";
return EXIT_SUCCESS; return EXIT_SUCCESS;
} }
osrm::tools::springclean(); osrm::tools::springclean();

View File

@ -1,6 +1,6 @@
#include "storage/storage.hpp" #include "storage/storage.hpp"
#include "util/exception.hpp" #include "util/exception.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include "util/typedefs.hpp" #include "util/typedefs.hpp"
#include "util/version.hpp" #include "util/version.hpp"
@ -49,7 +49,7 @@ bool generateDataStoreOptions(const int argc,
// print help options if no infile is specified // print help options if no infile is specified
if (argc < 2) if (argc < 2)
{ {
util::SimpleLogger().Write() << visible_options; util::Log() << visible_options;
return false; return false;
} }
@ -66,19 +66,19 @@ bool generateDataStoreOptions(const int argc,
} }
catch (const boost::program_options::error &e) catch (const boost::program_options::error &e)
{ {
util::SimpleLogger().Write(logWARNING) << "[error] " << e.what(); util::Log(logERROR) << e.what();
return false; return false;
} }
if (option_variables.count("version")) if (option_variables.count("version"))
{ {
util::SimpleLogger().Write() << OSRM_VERSION; util::Log() << OSRM_VERSION;
return false; return false;
} }
if (option_variables.count("help")) if (option_variables.count("help"))
{ {
util::SimpleLogger().Write() << visible_options; util::Log() << visible_options;
return false; return false;
} }
@ -100,7 +100,7 @@ int main(const int argc, const char *argv[]) try
storage::StorageConfig config(base_path); storage::StorageConfig config(base_path);
if (!config.IsValid()) if (!config.IsValid())
{ {
util::SimpleLogger().Write(logWARNING) << "Config contains invalid file paths. Exiting!"; util::Log(logERROR) << "Config contains invalid file paths. Exiting!";
return EXIT_FAILURE; return EXIT_FAILURE;
} }
storage::Storage storage(std::move(config)); storage::Storage storage(std::move(config));
@ -115,8 +115,8 @@ int main(const int argc, const char *argv[]) try
{ {
if (retry_counter > 0) if (retry_counter > 0)
{ {
util::SimpleLogger().Write(logWARNING) << "Try number " << (retry_counter + 1) util::Log(logWARNING) << "Try number " << (retry_counter + 1)
<< " to load the dataset."; << " to load the dataset.";
} }
code = storage.Run(max_wait); code = storage.Run(max_wait);
retry_counter++; retry_counter++;
@ -131,9 +131,8 @@ int main(const int argc, const char *argv[]) try
} }
catch (const std::bad_alloc &e) catch (const std::bad_alloc &e)
{ {
util::SimpleLogger().Write(logWARNING) << "[exception] " << e.what(); util::Log(logERROR) << "[exception] " << e.what();
util::SimpleLogger().Write(logWARNING) util::Log(logERROR) << "Please provide more memory or disable locking the virtual "
<< "Please provide more memory or disable locking the virtual " "address space (note: this makes OSRM swap, i.e. slow)";
"address space (note: this makes OSRM swap, i.e. slow)";
return EXIT_FAILURE; return EXIT_FAILURE;
} }

View File

@ -1,12 +1,12 @@
#include "storage/shared_barriers.hpp" #include "storage/shared_barriers.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include <iostream> #include <iostream>
int main() int main()
{ {
osrm::util::LogPolicy::GetInstance().Unmute(); osrm::util::LogPolicy::GetInstance().Unmute();
osrm::util::SimpleLogger().Write() << "Releasing all locks"; osrm::util::Log() << "Releasing all locks";
osrm::storage::SharedBarriers::resetCurrentRegions(); osrm::storage::SharedBarriers::resetCurrentRegions();
osrm::storage::SharedBarriers::resetRegions1(); osrm::storage::SharedBarriers::resetRegions1();

View File

@ -1,7 +1,7 @@
#include "util/coordinate_calculation.hpp" #include "util/coordinate_calculation.hpp"
#ifndef NDEBUG #ifndef NDEBUG
#include "util/simple_logger.hpp" #include "util/log.hpp"
#endif #endif
#include "osrm/coordinate.hpp" #include "osrm/coordinate.hpp"

119
src/util/log.cpp Normal file
View File

@ -0,0 +1,119 @@
#include "util/log.hpp"
#include "util/isatty.hpp"
#include <cstdio>
#include <iostream>
#include <mutex>
#include <string>
namespace osrm
{
namespace util
{
namespace
{
static const char COL_RESET[]{"\x1b[0m"};
static const char RED[]{"\x1b[31m"};
static const char YELLOW[]{"\x1b[33m"};
#ifndef NDEBUG
static const char MAGENTA[]{"\x1b[35m"};
#endif
// static const char GREEN[] { "\x1b[32m"};
// static const char BLUE[] { "\x1b[34m"};
// static const char CYAN[] { "\x1b[36m"};
}
void LogPolicy::Unmute() { m_is_mute = false; }
void LogPolicy::Mute() { m_is_mute = true; }
bool LogPolicy::IsMute() const { return m_is_mute; }
LogPolicy &LogPolicy::GetInstance()
{
static LogPolicy runningInstance;
return runningInstance;
}
Log::Log(LogLevel level_, std::ostream &ostream) : level(level_), stream(ostream)
{
const bool is_terminal = IsStdoutATTY();
std::lock_guard<std::mutex> lock(get_mutex());
switch (level)
{
case logWARNING:
stream << (is_terminal ? YELLOW : "") << "[warn] ";
break;
case logERROR:
stream << (is_terminal ? RED : "") << "[error] ";
break;
case logDEBUG:
#ifndef NDEBUG
stream << (is_terminal ? MAGENTA : "") << "[debug] ";
#endif
break;
default: // logINFO:
stream << "[info] ";
break;
}
}
Log::Log(LogLevel level_) : Log(level_, buffer) {}
std::mutex &Log::get_mutex()
{
static std::mutex mtx;
return mtx;
}
/**
* Close down this logging instance.
* This destructor is responsible for flushing any buffered data,
* and printing a newline character (each logger object is responsible for only one line)
* Because sub-classes can replace the `stream` object - we need to verify whether
* we're writing to std::cerr/cout, or whether we should write to the stream
*/
Log::~Log()
{
std::lock_guard<std::mutex> lock(get_mutex());
const bool usestd = (&stream == &buffer);
if (!LogPolicy::GetInstance().IsMute())
{
const bool is_terminal = IsStdoutATTY();
if (usestd)
{
switch (level)
{
case logWARNING:
case logERROR:
std::cerr << buffer.str();
std::cerr << (is_terminal ? COL_RESET : "");
std::cerr << std::endl;
break;
case logDEBUG:
#ifdef NDEBUG
break;
#endif
case logINFO:
default:
std::cout << buffer.str();
std::cout << (is_terminal ? COL_RESET : "");
std::cout << std::endl;
break;
}
}
else
{
stream << (is_terminal ? COL_RESET : "");
stream << std::endl;
}
}
}
UnbufferedLog::UnbufferedLog(LogLevel level_)
: Log(level_, (level_ == logWARNING || level_ == logERROR) ? std::cerr : std::cout)
{
stream.flags(std::ios_base::unitbuf);
}
}
}

View File

@ -1,6 +1,6 @@
#include "util/name_table.hpp" #include "util/name_table.hpp"
#include "util/exception.hpp" #include "util/exception.hpp"
#include "util/simple_logger.hpp" #include "util/log.hpp"
#include <algorithm> #include <algorithm>
#include <fstream> #include <fstream>
@ -32,9 +32,8 @@ NameTable::NameTable(const std::string &filename)
} }
else else
{ {
util::SimpleLogger().Write(logINFO) util::Log() << "list of street names is empty in construction of name table from: \""
<< "list of street names is empty in construction of name table from: \"" << filename << filename << "\"";
<< "\"";
} }
} }

View File

@ -1,95 +0,0 @@
#include "util/simple_logger.hpp"
#include "util/isatty.hpp"
#include <cstdio>
#include <iostream>
#include <mutex>
#include <string>
namespace osrm
{
namespace util
{
namespace
{
static const char COL_RESET[]{"\x1b[0m"};
static const char RED[]{"\x1b[31m"};
#ifndef NDEBUG
static const char YELLOW[]{"\x1b[33m"};
#endif
// static const char GREEN[] { "\x1b[32m"};
// static const char BLUE[] { "\x1b[34m"};
// static const char MAGENTA[] { "\x1b[35m"};
// static const char CYAN[] { "\x1b[36m"};
}
void LogPolicy::Unmute() { m_is_mute = false; }
void LogPolicy::Mute() { m_is_mute = true; }
bool LogPolicy::IsMute() const { return m_is_mute; }
LogPolicy &LogPolicy::GetInstance()
{
static LogPolicy runningInstance;
return runningInstance;
}
SimpleLogger::SimpleLogger() : level(logINFO) {}
std::mutex &SimpleLogger::get_mutex()
{
static std::mutex mtx;
return mtx;
}
std::ostringstream &SimpleLogger::Write(LogLevel lvl) noexcept
{
std::lock_guard<std::mutex> lock(get_mutex());
level = lvl;
os << "[";
switch (level)
{
case logWARNING:
os << "warn";
break;
case logDEBUG:
#ifndef NDEBUG
os << "debug";
#endif
break;
default: // logINFO:
os << "info";
break;
}
os << "] ";
return os;
}
SimpleLogger::~SimpleLogger()
{
std::lock_guard<std::mutex> lock(get_mutex());
if (!LogPolicy::GetInstance().IsMute())
{
const bool is_terminal = IsStdoutATTY();
switch (level)
{
case logWARNING:
std::cerr << (is_terminal ? RED : "") << os.str() << (is_terminal ? COL_RESET : "")
<< std::endl;
break;
case logDEBUG:
#ifndef NDEBUG
std::cout << (is_terminal ? YELLOW : "") << os.str() << (is_terminal ? COL_RESET : "")
<< std::endl;
#endif
break;
case logINFO:
default:
std::cout << os.str() << (is_terminal ? COL_RESET : "") << std::endl;
break;
}
}
}
}
}

View File

@ -43,9 +43,9 @@ BOOST_AUTO_TEST_CASE(io_nonexistent_file)
} }
catch (const osrm::util::exception &e) catch (const osrm::util::exception &e)
{ {
std::cout << e.what() << std::endl; const std::string expected("Error opening non_existent_test_io.tmp");
BOOST_REQUIRE(std::string(e.what()) == const std::string got(e.what());
"Error opening non_existent_test_io.tmp"); BOOST_REQUIRE(std::equal(expected.begin(), expected.end(), got.begin()));
} }
} }
@ -71,9 +71,10 @@ BOOST_AUTO_TEST_CASE(file_too_small)
} }
catch (const osrm::util::exception &e) catch (const osrm::util::exception &e)
{ {
std::cout << e.what() << std::endl; const std::string expected(
BOOST_REQUIRE(std::string(e.what()) == "Error reading from file_too_small_test_io.tmp: Unexpected end of file");
"Error reading from file_too_small_test_io.tmp: Unexpected end of file"); const std::string got(e.what());
BOOST_REQUIRE(std::equal(expected.begin(), expected.end(), got.begin()));
} }
} }
@ -98,9 +99,9 @@ BOOST_AUTO_TEST_CASE(io_corrupt_fingerprint)
} }
catch (const osrm::util::exception &e) catch (const osrm::util::exception &e)
{ {
std::cout << e.what() << std::endl; const std::string expected("Fingerprint mismatch in corrupt_fingerprint_file_test_io.tmp");
BOOST_REQUIRE(std::string(e.what()) == const std::string got(e.what());
"Fingerprint mismatch in corrupt_fingerprint_file_test_io.tmp"); BOOST_REQUIRE(std::equal(expected.begin(), expected.end(), got.begin()));
} }
} }