2021-03-09 22:24:35 +00:00
|
|
|
#include "nodedb.hpp"
|
2019-01-10 19:41:51 +00:00
|
|
|
|
2023-10-24 13:18:03 +00:00
|
|
|
#include "crypto/types.hpp"
|
|
|
|
#include "dht/kademlia.hpp"
|
|
|
|
#include "router_contact.hpp"
|
|
|
|
#include "util/time.hpp"
|
|
|
|
|
2023-10-19 21:59:57 +00:00
|
|
|
#include <algorithm>
|
|
|
|
#include <unordered_map>
|
|
|
|
#include <utility>
|
|
|
|
|
2018-06-13 13:09:19 +00:00
|
|
|
static const char skiplist_subdirs[] = "0123456789abcdef";
|
2018-08-02 23:36:34 +00:00
|
|
|
static const std::string RC_FILE_EXT = ".signed";
|
2018-05-30 00:40:02 +00:00
|
|
|
|
2021-02-02 14:35:40 +00:00
|
|
|
namespace llarp
|
2018-12-10 23:29:58 +00:00
|
|
|
{
|
2021-02-02 14:35:40 +00:00
|
|
|
static void
|
|
|
|
EnsureSkiplist(fs::path nodedbDir)
|
2019-03-25 13:52:22 +00:00
|
|
|
{
|
2021-02-02 14:35:40 +00:00
|
|
|
if (not fs::exists(nodedbDir))
|
2019-03-25 13:52:22 +00:00
|
|
|
{
|
2021-02-02 14:35:40 +00:00
|
|
|
// if the old 'netdb' directory exists, move it to this one
|
|
|
|
fs::path parent = nodedbDir.parent_path();
|
|
|
|
fs::path old = parent / "netdb";
|
|
|
|
if (fs::exists(old))
|
|
|
|
fs::rename(old, nodedbDir);
|
2019-03-25 13:52:22 +00:00
|
|
|
else
|
2021-02-02 14:35:40 +00:00
|
|
|
fs::create_directory(nodedbDir);
|
2019-03-25 13:52:22 +00:00
|
|
|
}
|
2020-02-13 22:19:12 +00:00
|
|
|
|
2021-02-02 14:35:40 +00:00
|
|
|
if (not fs::is_directory(nodedbDir))
|
2022-07-16 00:41:14 +00:00
|
|
|
throw std::runtime_error{fmt::format("nodedb {} is not a directory", nodedbDir)};
|
2020-02-13 22:19:12 +00:00
|
|
|
|
2021-02-02 14:35:40 +00:00
|
|
|
for (const char& ch : skiplist_subdirs)
|
2019-09-12 18:19:25 +00:00
|
|
|
{
|
2021-02-02 14:35:40 +00:00
|
|
|
// this seems to be a problem on all targets
|
|
|
|
// perhaps cpp17::fs is just as screwed-up
|
|
|
|
// attempting to create a folder with no name
|
|
|
|
// what does this mean...?
|
|
|
|
if (!ch)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
fs::path sub = nodedbDir / std::string(&ch, 1);
|
|
|
|
fs::create_directory(sub);
|
2019-09-12 18:19:25 +00:00
|
|
|
}
|
2019-06-26 21:39:29 +00:00
|
|
|
}
|
2019-06-17 14:23:38 +00:00
|
|
|
|
2021-02-02 14:35:40 +00:00
|
|
|
constexpr auto FlushInterval = 5min;
|
2018-06-13 12:58:51 +00:00
|
|
|
|
2023-10-12 13:43:07 +00:00
|
|
|
NodeDB::NodeDB(fs::path root, std::function<void(std::function<void()>)> diskCaller, Router* r)
|
|
|
|
: router{*r}
|
|
|
|
, m_Root{std::move(root)}
|
2021-02-02 14:35:40 +00:00
|
|
|
, disk(std::move(diskCaller))
|
|
|
|
, m_NextFlushAt{time_now_ms() + FlushInterval}
|
2018-06-13 11:37:44 +00:00
|
|
|
{
|
2021-02-02 14:35:40 +00:00
|
|
|
EnsureSkiplist(m_Root);
|
2018-05-30 00:40:02 +00:00
|
|
|
}
|
|
|
|
|
2021-02-02 14:35:40 +00:00
|
|
|
void
|
|
|
|
NodeDB::Tick(llarp_time_t now)
|
2019-06-17 14:23:38 +00:00
|
|
|
{
|
2021-04-02 15:10:37 +00:00
|
|
|
if (m_NextFlushAt == 0s)
|
|
|
|
return;
|
|
|
|
|
2021-02-02 14:35:40 +00:00
|
|
|
if (now > m_NextFlushAt)
|
|
|
|
{
|
2023-10-12 18:46:31 +00:00
|
|
|
router.loop()->call([this]() {
|
|
|
|
m_NextFlushAt += FlushInterval;
|
|
|
|
// make copy of all rcs
|
2023-11-02 12:30:38 +00:00
|
|
|
std::vector<RemoteRC> copy;
|
|
|
|
|
2023-11-15 19:27:54 +00:00
|
|
|
for (const auto& item : known_rcs)
|
|
|
|
copy.push_back(item.second);
|
2023-11-02 12:30:38 +00:00
|
|
|
|
2023-10-12 18:46:31 +00:00
|
|
|
// flush them to disk in one big job
|
|
|
|
// TODO: split this up? idk maybe some day...
|
|
|
|
disk([this, data = std::move(copy)]() {
|
|
|
|
for (const auto& rc : data)
|
2023-10-31 20:49:01 +00:00
|
|
|
rc.write(get_path_by_pubkey(rc.router_id()));
|
2023-10-12 18:46:31 +00:00
|
|
|
});
|
2021-02-02 14:35:40 +00:00
|
|
|
});
|
|
|
|
}
|
2019-06-17 14:23:38 +00:00
|
|
|
}
|
|
|
|
|
2021-02-02 14:35:40 +00:00
|
|
|
fs::path
|
2023-10-12 13:43:07 +00:00
|
|
|
NodeDB::get_path_by_pubkey(RouterID pubkey) const
|
2021-02-02 14:35:40 +00:00
|
|
|
{
|
2022-02-17 18:44:31 +00:00
|
|
|
std::string hexString = oxenc::to_hex(pubkey.begin(), pubkey.end());
|
2021-02-02 14:35:40 +00:00
|
|
|
std::string skiplistDir;
|
2020-01-14 17:01:41 +00:00
|
|
|
|
2021-02-02 14:35:40 +00:00
|
|
|
const llarp::RouterID r{pubkey};
|
|
|
|
std::string fname = r.ToString();
|
2019-06-17 14:23:38 +00:00
|
|
|
|
2021-02-02 14:35:40 +00:00
|
|
|
skiplistDir += hexString[0];
|
|
|
|
fname += RC_FILE_EXT;
|
|
|
|
return m_Root / skiplistDir / fname;
|
2018-04-08 12:18:16 +00:00
|
|
|
}
|
2021-02-02 14:35:40 +00:00
|
|
|
|
2023-11-15 04:55:38 +00:00
|
|
|
bool
|
|
|
|
NodeDB::want_rc(const RouterID& rid) const
|
|
|
|
{
|
|
|
|
if (not router.is_service_node())
|
|
|
|
return true;
|
|
|
|
return registered_routers.count(rid);
|
|
|
|
}
|
|
|
|
|
2023-11-15 02:53:19 +00:00
|
|
|
void
|
|
|
|
NodeDB::set_bootstrap_routers(const std::set<RemoteRC>& rcs)
|
|
|
|
{
|
|
|
|
bootstraps.clear(); // this function really shouldn't be called more than once, but...
|
|
|
|
for (const auto& rc : rcs)
|
2023-11-27 16:31:43 +00:00
|
|
|
{
|
2023-11-15 02:53:19 +00:00
|
|
|
bootstraps.emplace(rc.router_id(), rc);
|
2023-11-27 16:31:43 +00:00
|
|
|
}
|
2023-11-15 02:53:19 +00:00
|
|
|
}
|
|
|
|
|
2023-11-28 00:07:29 +00:00
|
|
|
/// Called in normal operation when the relay we fetched RCs from gives either a "bad"
|
|
|
|
/// response or a timeout. Attempts to switch to a new relay as our RC source, using
|
|
|
|
/// existing connections if possible, and respecting pinned edges.
|
2023-11-17 07:41:42 +00:00
|
|
|
void
|
|
|
|
NodeDB::rotate_rc_source()
|
2023-11-25 00:40:51 +00:00
|
|
|
{
|
|
|
|
auto conn_count = router.link_manager().get_num_connected();
|
2023-11-28 00:07:29 +00:00
|
|
|
|
|
|
|
// This function makes no sense to be called if we have no connections...
|
2023-11-25 00:40:51 +00:00
|
|
|
if (conn_count == 0)
|
2023-11-28 00:07:29 +00:00
|
|
|
throw std::runtime_error{"Called rotate_rc_source with no connections, does not make sense!"};
|
|
|
|
|
|
|
|
// We should not be in this function if client_known_routers isn't populated
|
|
|
|
if (client_known_routers.size() <= 1)
|
|
|
|
throw std::runtime_error{"Cannot rotate RC source without RC source(s) to rotate to!"};
|
|
|
|
|
2023-11-25 00:40:51 +00:00
|
|
|
RemoteRC new_source{};
|
|
|
|
router.link_manager().get_random_connected(new_source);
|
|
|
|
if (conn_count == 1)
|
|
|
|
{
|
2023-11-28 00:07:29 +00:00
|
|
|
// if we only have one connection, it must be current rc fetch source
|
|
|
|
assert(new_source.router_id() == rc_fetch_source);
|
|
|
|
|
|
|
|
if (pinned_edges.size() == 1)
|
|
|
|
{
|
|
|
|
// only one pinned edge set, use it even though it gave unsatisfactory RCs
|
|
|
|
assert(rc_fetch_source == *(pinned_edges.begin()));
|
|
|
|
log::warning(
|
|
|
|
logcat,
|
|
|
|
"Single pinned edge {} gave bad RC response; still using it despite this.",
|
|
|
|
rc_fetch_source);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// only one connection, choose a new relay to connect to for rc fetching
|
|
|
|
|
|
|
|
RouterID r = rc_fetch_source;
|
|
|
|
while (r == rc_fetch_source)
|
|
|
|
{
|
|
|
|
std::sample(client_known_routers.begin(), client_known_routers.end(), &r, 1, csrng);
|
|
|
|
}
|
|
|
|
rc_fetch_source = std::move(r);
|
|
|
|
return;
|
2023-11-25 00:40:51 +00:00
|
|
|
}
|
|
|
|
|
2023-11-28 00:07:29 +00:00
|
|
|
// choose one of our other existing connections to use as the RC fetch source
|
2023-11-25 00:40:51 +00:00
|
|
|
while (new_source.router_id() == rc_fetch_source)
|
|
|
|
{
|
|
|
|
router.link_manager().get_random_connected(new_source);
|
|
|
|
}
|
|
|
|
rc_fetch_source = new_source.router_id();
|
|
|
|
}
|
2023-11-17 07:41:42 +00:00
|
|
|
|
|
|
|
// TODO: trust model
|
|
|
|
void
|
|
|
|
NodeDB::ingest_rcs(RouterID source, std::vector<RemoteRC> rcs, rc_time timestamp)
|
|
|
|
{
|
|
|
|
(void)source;
|
|
|
|
|
|
|
|
// TODO: if we don't currently have a "trusted" relay we've been fetching from,
|
|
|
|
// this will be a full list of RCs. We need to first check if it aligns closely
|
|
|
|
// with our trusted RouterID list, then replace our RCs with the incoming set.
|
|
|
|
|
|
|
|
for (auto& rc : rcs)
|
|
|
|
put_rc_if_newer(std::move(rc), timestamp);
|
|
|
|
|
|
|
|
// TODO: if we have a "trusted" relay we've been fetching from, this will be
|
|
|
|
// an incremental update to the RC list, so *after* insertion we check if the
|
|
|
|
// RCs' RouterIDs closely match our trusted RouterID list.
|
|
|
|
|
|
|
|
last_rc_update_relay_timestamp = timestamp;
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: trust model
|
|
|
|
void
|
|
|
|
NodeDB::ingest_router_ids(RouterID source, std::vector<RouterID> ids)
|
|
|
|
{
|
|
|
|
router_id_fetch_responses[source] = std::move(ids);
|
|
|
|
|
|
|
|
router_id_response_count++;
|
|
|
|
if (router_id_response_count == router_id_fetch_sources.size())
|
|
|
|
{
|
2023-11-25 00:40:51 +00:00
|
|
|
// TODO: reconcile all the responses, for now just insert all
|
|
|
|
for (const auto& [rid, responses] : router_id_fetch_responses)
|
|
|
|
{
|
|
|
|
// TODO: empty == failure, handle that case
|
|
|
|
for (const auto& response : responses)
|
|
|
|
{
|
|
|
|
client_known_routers.insert(std::move(response));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
router_id_fetch_in_progress = false;
|
2023-11-17 07:41:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2023-11-27 16:46:14 +00:00
|
|
|
NodeDB::fetch_rcs()
|
2023-11-17 07:41:42 +00:00
|
|
|
{
|
|
|
|
std::vector<RouterID> needed;
|
|
|
|
|
|
|
|
const auto now =
|
|
|
|
std::chrono::time_point_cast<std::chrono::seconds>(std::chrono::system_clock::now());
|
|
|
|
for (const auto& [rid, rc] : known_rcs)
|
|
|
|
{
|
|
|
|
if (now - rc.timestamp() > RouterContact::OUTDATED_AGE)
|
|
|
|
needed.push_back(rid);
|
|
|
|
}
|
|
|
|
|
|
|
|
router.link_manager().fetch_rcs(
|
|
|
|
rc_fetch_source, last_rc_update_relay_timestamp, std::move(needed));
|
|
|
|
}
|
|
|
|
|
2023-11-25 00:40:51 +00:00
|
|
|
void
|
|
|
|
NodeDB::fetch_router_ids()
|
|
|
|
{
|
|
|
|
if (router_id_fetch_in_progress)
|
|
|
|
return;
|
|
|
|
if (router_id_fetch_sources.empty())
|
2023-11-27 19:05:11 +00:00
|
|
|
select_router_id_sources();
|
2023-11-25 00:40:51 +00:00
|
|
|
|
|
|
|
// if we *still* don't have fetch sources, we can't exactly fetch...
|
|
|
|
if (router_id_fetch_sources.empty())
|
|
|
|
{
|
|
|
|
log::info(logcat, "Attempting to fetch RouterIDs, but have no source from which to do so.");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
router_id_fetch_in_progress = true;
|
|
|
|
router_id_response_count = 0;
|
|
|
|
router_id_fetch_responses.clear();
|
|
|
|
for (const auto& rid : router_id_fetch_sources)
|
|
|
|
router.link_manager().fetch_router_ids(rid);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
NodeDB::select_router_id_sources(std::unordered_set<RouterID> excluded)
|
|
|
|
{
|
|
|
|
// TODO: bootstrapping should be finished before this is called, so this
|
|
|
|
// shouldn't happen; need to make sure that's the case.
|
|
|
|
if (client_known_routers.empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// keep using any we've been using, but remove `excluded` ones
|
|
|
|
for (const auto& r : excluded)
|
|
|
|
router_id_fetch_sources.erase(r);
|
|
|
|
|
|
|
|
// only know so many routers, so no need to randomize
|
|
|
|
if (client_known_routers.size() <= (ROUTER_ID_SOURCE_COUNT + excluded.size()))
|
|
|
|
{
|
|
|
|
for (const auto& r : client_known_routers)
|
|
|
|
{
|
|
|
|
if (excluded.count(r))
|
|
|
|
continue;
|
|
|
|
router_id_fetch_sources.insert(r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// select at random until we have chosen enough
|
|
|
|
while (router_id_fetch_sources.size() < ROUTER_ID_SOURCE_COUNT)
|
|
|
|
{
|
|
|
|
RouterID r;
|
|
|
|
std::sample(client_known_routers.begin(), client_known_routers.end(), &r, 1, csrng);
|
|
|
|
if (excluded.count(r) == 0)
|
|
|
|
router_id_fetch_sources.insert(r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-15 02:53:19 +00:00
|
|
|
void
|
|
|
|
NodeDB::set_router_whitelist(
|
|
|
|
const std::vector<RouterID>& whitelist,
|
|
|
|
const std::vector<RouterID>& greylist,
|
|
|
|
const std::vector<RouterID>& greenlist)
|
|
|
|
{
|
|
|
|
if (whitelist.empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
registered_routers.clear();
|
|
|
|
registered_routers.insert(whitelist.begin(), whitelist.end());
|
|
|
|
registered_routers.insert(greylist.begin(), greylist.end());
|
|
|
|
registered_routers.insert(greenlist.begin(), greenlist.end());
|
|
|
|
|
|
|
|
router_whitelist.clear();
|
|
|
|
router_whitelist.insert(whitelist.begin(), whitelist.end());
|
|
|
|
router_greylist.clear();
|
|
|
|
router_greylist.insert(greylist.begin(), greylist.end());
|
|
|
|
router_greenlist.clear();
|
|
|
|
router_greenlist.insert(greenlist.begin(), greenlist.end());
|
|
|
|
|
|
|
|
log::info(
|
|
|
|
logcat, "lokinet service node list now has ", router_whitelist.size(), " active routers");
|
|
|
|
}
|
|
|
|
|
|
|
|
std::optional<RouterID>
|
|
|
|
NodeDB::get_random_whitelist_router() const
|
|
|
|
{
|
|
|
|
const auto sz = router_whitelist.size();
|
|
|
|
if (sz == 0)
|
|
|
|
return std::nullopt;
|
|
|
|
auto itr = router_whitelist.begin();
|
|
|
|
if (sz > 1)
|
|
|
|
std::advance(itr, randint() % sz);
|
|
|
|
return *itr;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
NodeDB::is_connection_allowed(const RouterID& remote) const
|
|
|
|
{
|
2023-11-27 18:28:45 +00:00
|
|
|
if (pinned_edges.size() && pinned_edges.count(remote) == 0 && bootstraps.count(remote) == 0)
|
2023-11-15 02:53:19 +00:00
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (not router.is_service_node())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return router_whitelist.count(remote) or router_greylist.count(remote);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
NodeDB::is_first_hop_allowed(const RouterID& remote) const
|
|
|
|
{
|
|
|
|
if (pinned_edges.size() && pinned_edges.count(remote) == 0)
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-02-02 14:35:40 +00:00
|
|
|
void
|
2023-10-12 13:43:07 +00:00
|
|
|
NodeDB::load_from_disk()
|
2018-05-30 00:40:02 +00:00
|
|
|
{
|
2021-04-02 15:10:37 +00:00
|
|
|
if (m_Root.empty())
|
|
|
|
return;
|
2022-07-26 15:05:18 +00:00
|
|
|
|
2023-11-27 16:31:43 +00:00
|
|
|
std::set<fs::path> purge;
|
2023-10-12 18:46:31 +00:00
|
|
|
|
2023-11-27 16:31:43 +00:00
|
|
|
const auto now = time_now_ms();
|
|
|
|
|
|
|
|
for (const char& ch : skiplist_subdirs)
|
|
|
|
{
|
|
|
|
if (!ch)
|
|
|
|
continue;
|
|
|
|
std::string p;
|
|
|
|
p += ch;
|
|
|
|
fs::path sub = m_Root / p;
|
2022-07-26 15:05:18 +00:00
|
|
|
|
2023-11-27 16:31:43 +00:00
|
|
|
llarp::util::IterDir(sub, [&](const fs::path& f) -> bool {
|
|
|
|
// skip files that are not suffixed with .signed
|
|
|
|
if (not(fs::is_regular_file(f) and f.extension() == RC_FILE_EXT))
|
2022-07-26 15:05:18 +00:00
|
|
|
return true;
|
|
|
|
|
2023-11-27 16:31:43 +00:00
|
|
|
RemoteRC rc{};
|
2022-07-26 15:05:18 +00:00
|
|
|
|
2023-11-27 16:31:43 +00:00
|
|
|
if (not rc.read(f))
|
|
|
|
{
|
|
|
|
// try loading it, purge it if it is junk
|
|
|
|
purge.emplace(f);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rc.is_expired(now))
|
|
|
|
{
|
|
|
|
// rc expired dont load it and purge it later
|
|
|
|
purge.emplace(f);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
known_rcs.emplace(rc.router_id(), rc);
|
|
|
|
// TODO: the list of relays should be maintained and stored separately from
|
|
|
|
// the RCs, as we keep older RCs around in case we go offline and need to
|
|
|
|
// bootstrap, but they shouldn't be in the "good relays" list.
|
|
|
|
client_known_routers.insert(rc.router_id());
|
|
|
|
|
|
|
|
return true;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
if (not purge.empty())
|
|
|
|
{
|
|
|
|
log::warning(logcat, "removing {} invalid RCs from disk", purge.size());
|
|
|
|
|
|
|
|
for (const auto& fpath : purge)
|
|
|
|
fs::remove(fpath);
|
|
|
|
}
|
2018-05-30 00:40:02 +00:00
|
|
|
}
|
2021-02-02 14:35:40 +00:00
|
|
|
|
|
|
|
void
|
2023-10-12 13:43:07 +00:00
|
|
|
NodeDB::save_to_disk() const
|
2018-05-22 15:54:19 +00:00
|
|
|
{
|
2021-04-02 15:10:37 +00:00
|
|
|
if (m_Root.empty())
|
|
|
|
return;
|
|
|
|
|
2023-10-12 13:43:07 +00:00
|
|
|
router.loop()->call([this]() {
|
2023-11-15 19:27:54 +00:00
|
|
|
for (const auto& item : known_rcs)
|
|
|
|
item.second.write(get_path_by_pubkey(item.first));
|
2023-10-12 13:43:07 +00:00
|
|
|
});
|
2018-04-08 12:18:16 +00:00
|
|
|
}
|
2018-04-30 16:14:20 +00:00
|
|
|
|
2021-02-02 14:35:40 +00:00
|
|
|
bool
|
2023-11-27 18:28:45 +00:00
|
|
|
NodeDB::has_rc(RouterID pk) const
|
2018-09-11 15:53:54 +00:00
|
|
|
{
|
2023-11-15 19:27:54 +00:00
|
|
|
return known_rcs.count(pk);
|
2018-09-11 15:53:54 +00:00
|
|
|
}
|
|
|
|
|
2023-11-02 12:30:38 +00:00
|
|
|
std::optional<RemoteRC>
|
2023-10-12 13:43:07 +00:00
|
|
|
NodeDB::get_rc(RouterID pk) const
|
2018-06-25 15:12:08 +00:00
|
|
|
{
|
2023-11-15 19:27:54 +00:00
|
|
|
const auto itr = known_rcs.find(pk);
|
2023-10-12 18:46:31 +00:00
|
|
|
|
2023-11-15 19:27:54 +00:00
|
|
|
if (itr == known_rcs.end())
|
2023-11-15 02:53:19 +00:00
|
|
|
return std::nullopt;
|
2023-10-12 18:46:31 +00:00
|
|
|
|
2023-11-15 19:27:54 +00:00
|
|
|
return itr->second;
|
2018-06-21 09:33:23 +00:00
|
|
|
}
|
|
|
|
|
2021-02-02 14:35:40 +00:00
|
|
|
void
|
2023-10-12 13:43:07 +00:00
|
|
|
NodeDB::remove_router(RouterID pk)
|
2020-06-11 11:44:02 +00:00
|
|
|
{
|
2023-10-12 13:43:07 +00:00
|
|
|
router.loop()->call([this, pk]() {
|
2023-11-15 19:27:54 +00:00
|
|
|
known_rcs.erase(pk);
|
2023-10-12 13:43:07 +00:00
|
|
|
remove_many_from_disk_async({pk});
|
|
|
|
});
|
2020-06-11 11:44:02 +00:00
|
|
|
}
|
2018-06-07 09:36:30 +00:00
|
|
|
|
2021-02-02 14:35:40 +00:00
|
|
|
void
|
2023-11-27 16:31:43 +00:00
|
|
|
NodeDB::remove_stale_rcs()
|
2018-06-07 09:36:30 +00:00
|
|
|
{
|
2023-11-27 16:31:43 +00:00
|
|
|
auto cutoff_time =
|
|
|
|
std::chrono::time_point_cast<std::chrono::seconds>(std::chrono::system_clock::now());
|
|
|
|
cutoff_time -= router.is_service_node() ? RouterContact::OUTDATED_AGE : RouterContact::LIFETIME;
|
|
|
|
for (auto itr = known_rcs.begin(); itr != known_rcs.end();)
|
|
|
|
{
|
|
|
|
if (cutoff_time > itr->second.timestamp())
|
|
|
|
{
|
|
|
|
log::info(logcat, "Pruning RC for {}, as it is too old to keep.", itr->first);
|
|
|
|
known_rcs.erase(itr);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
itr++;
|
|
|
|
}
|
2018-06-07 09:36:30 +00:00
|
|
|
}
|
2018-06-13 12:58:51 +00:00
|
|
|
|
2023-11-15 04:55:38 +00:00
|
|
|
bool
|
2023-11-17 07:41:42 +00:00
|
|
|
NodeDB::put_rc(RemoteRC rc, rc_time now)
|
2020-04-02 17:56:13 +00:00
|
|
|
{
|
2023-11-15 04:55:38 +00:00
|
|
|
const auto& rid = rc.router_id();
|
|
|
|
if (not want_rc(rid))
|
|
|
|
return false;
|
2023-11-15 19:27:54 +00:00
|
|
|
known_rcs.erase(rid);
|
|
|
|
known_rcs.emplace(rid, std::move(rc));
|
2023-11-17 07:41:42 +00:00
|
|
|
last_rc_update_times[rid] = now;
|
2023-11-15 04:55:38 +00:00
|
|
|
return true;
|
2020-04-02 17:56:13 +00:00
|
|
|
}
|
2018-04-08 12:18:16 +00:00
|
|
|
|
2021-02-02 14:35:40 +00:00
|
|
|
size_t
|
2023-10-12 13:43:07 +00:00
|
|
|
NodeDB::num_loaded() const
|
2018-05-22 15:54:19 +00:00
|
|
|
{
|
2023-11-15 19:27:54 +00:00
|
|
|
return router.loop()->call_get([this]() { return known_rcs.size(); });
|
2018-04-08 12:18:16 +00:00
|
|
|
}
|
2018-09-13 16:41:53 +00:00
|
|
|
|
2023-11-15 04:55:38 +00:00
|
|
|
bool
|
2023-11-17 07:41:42 +00:00
|
|
|
NodeDB::put_rc_if_newer(RemoteRC rc, rc_time now)
|
2018-11-14 18:02:27 +00:00
|
|
|
{
|
2023-11-15 19:27:54 +00:00
|
|
|
auto itr = known_rcs.find(rc.router_id());
|
|
|
|
if (itr == known_rcs.end() or itr->second.other_is_newer(rc))
|
2023-11-15 04:55:38 +00:00
|
|
|
{
|
2023-11-17 07:41:42 +00:00
|
|
|
return put_rc(std::move(rc), now);
|
2023-11-15 04:55:38 +00:00
|
|
|
}
|
|
|
|
return false;
|
2018-11-14 18:02:27 +00:00
|
|
|
}
|
2021-02-02 14:35:40 +00:00
|
|
|
|
|
|
|
void
|
2023-10-12 13:43:07 +00:00
|
|
|
NodeDB::remove_many_from_disk_async(std::unordered_set<RouterID> remove) const
|
2018-11-14 18:02:27 +00:00
|
|
|
{
|
2021-04-02 15:10:37 +00:00
|
|
|
if (m_Root.empty())
|
|
|
|
return;
|
2021-02-02 14:35:40 +00:00
|
|
|
// build file list
|
|
|
|
std::set<fs::path> files;
|
|
|
|
for (auto id : remove)
|
2018-11-14 18:02:27 +00:00
|
|
|
{
|
2023-10-12 13:43:07 +00:00
|
|
|
files.emplace(get_path_by_pubkey(std::move(id)));
|
2018-11-14 18:02:27 +00:00
|
|
|
}
|
2021-02-02 14:35:40 +00:00
|
|
|
// remove them from the disk via the diskio thread
|
|
|
|
disk([files]() {
|
|
|
|
for (auto fpath : files)
|
|
|
|
fs::remove(fpath);
|
|
|
|
});
|
2018-11-14 18:02:27 +00:00
|
|
|
}
|
|
|
|
|
2023-11-02 12:30:38 +00:00
|
|
|
RemoteRC
|
2023-10-12 13:43:07 +00:00
|
|
|
NodeDB::find_closest_to(llarp::dht::Key_t location) const
|
2019-03-25 13:52:22 +00:00
|
|
|
{
|
2023-11-02 12:30:38 +00:00
|
|
|
return router.loop()->call_get([this, location]() -> RemoteRC {
|
|
|
|
RemoteRC rc;
|
2023-10-12 13:43:07 +00:00
|
|
|
const llarp::dht::XorMetric compare(location);
|
2023-11-02 12:30:38 +00:00
|
|
|
|
2023-10-12 13:43:07 +00:00
|
|
|
VisitAll([&rc, compare](const auto& otherRC) {
|
2023-11-02 12:30:38 +00:00
|
|
|
const auto& rid = rc.router_id();
|
|
|
|
|
|
|
|
if (rid.IsZero() || compare(dht::Key_t{otherRC.router_id()}, dht::Key_t{rid}))
|
2023-10-12 13:43:07 +00:00
|
|
|
{
|
|
|
|
rc = otherRC;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
return rc;
|
2021-02-02 14:35:40 +00:00
|
|
|
});
|
2019-03-25 13:52:22 +00:00
|
|
|
}
|
2019-03-11 13:58:31 +00:00
|
|
|
|
2023-11-02 12:30:38 +00:00
|
|
|
std::vector<RemoteRC>
|
2023-10-12 13:43:07 +00:00
|
|
|
NodeDB::find_many_closest_to(llarp::dht::Key_t location, uint32_t numRouters) const
|
2020-03-11 20:45:48 +00:00
|
|
|
{
|
2023-11-02 12:30:38 +00:00
|
|
|
return router.loop()->call_get([this, location, numRouters]() -> std::vector<RemoteRC> {
|
|
|
|
std::vector<const RemoteRC*> all;
|
2021-02-02 14:35:40 +00:00
|
|
|
|
2023-11-15 19:27:54 +00:00
|
|
|
all.reserve(known_rcs.size());
|
|
|
|
for (auto& entry : known_rcs)
|
2023-10-12 13:43:07 +00:00
|
|
|
{
|
2023-11-15 19:27:54 +00:00
|
|
|
all.push_back(&entry.second);
|
2023-10-12 13:43:07 +00:00
|
|
|
}
|
2021-02-02 14:35:40 +00:00
|
|
|
|
2023-10-12 13:43:07 +00:00
|
|
|
auto it_mid = numRouters < all.size() ? all.begin() + numRouters : all.end();
|
|
|
|
std::partial_sort(
|
|
|
|
all.begin(), it_mid, all.end(), [compare = dht::XorMetric{location}](auto* a, auto* b) {
|
|
|
|
return compare(*a, *b);
|
|
|
|
});
|
2021-02-02 14:35:40 +00:00
|
|
|
|
2023-11-02 12:30:38 +00:00
|
|
|
std::vector<RemoteRC> closest;
|
2023-10-12 13:43:07 +00:00
|
|
|
closest.reserve(numRouters);
|
|
|
|
for (auto it = all.begin(); it != it_mid; ++it)
|
|
|
|
closest.push_back(**it);
|
2021-02-02 14:35:40 +00:00
|
|
|
|
2023-10-12 13:43:07 +00:00
|
|
|
return closest;
|
|
|
|
});
|
2019-03-11 13:58:31 +00:00
|
|
|
}
|
2021-02-02 14:35:40 +00:00
|
|
|
} // namespace llarp
|