lokinet/llarp/service/endpoint.cpp

1722 lines
44 KiB
C++
Raw Normal View History

#include "endpoint.hpp"
#include "auth.hpp"
#include "endpoint_state.hpp"
#include "endpoint_util.hpp"
#include "info.hpp"
#include "outbound_context.hpp"
#include "protocol.hpp"
#include "protocol_type.hpp"
#include <llarp/dht/key.hpp>
#include <llarp/link/contacts.hpp>
#include <llarp/link/tunnel.hpp>
2023-10-16 16:55:51 +00:00
#include <llarp/net/ip.hpp>
#include <llarp/net/ip_range.hpp>
#include <llarp/nodedb.hpp>
2023-10-16 16:55:51 +00:00
#include <llarp/path/path.hpp>
#include <llarp/profiling.hpp>
2023-01-24 18:14:00 +00:00
#include <llarp/router/route_poker.hpp>
#include <llarp/router/router.hpp>
#include <llarp/util/logging.hpp>
#include <llarp/util/priority_queue.hpp>
#include <optional>
2023-01-24 18:14:00 +00:00
#include <type_traits>
2019-07-30 23:42:13 +00:00
#include <utility>
#include <variant>
2021-03-12 17:41:48 +00:00
2023-10-16 16:55:51 +00:00
namespace llarp::service
{
2023-10-16 16:55:51 +00:00
static auto logcat = log::Cat("endpoint");
Endpoint::Endpoint(Router* r, Context* parent)
: path::Builder{r, 3, path::DEFAULT_LEN}
, context{parent}
, _inbound_queue{512}
, _send_queue{512}
, _recv_event_queue{512}
, _introset_lookup_filter{5s}
{
2023-10-16 16:55:51 +00:00
_state = std::make_unique<EndpointState>();
_state->router = r;
_state->name = "endpoint";
_recv_event_queue.enable();
2023-10-19 21:59:57 +00:00
// if (Loop()->MaybeGetUVWLoop())
// _tunnel_manager = std::make_unique<link::TunnelManager>(*this);
2023-10-16 16:55:51 +00:00
}
QUIC lokinet integration refactor Refactors how quic packets get handled: the actual tunnels now live in tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp tunnelling. service::Endpoint now holds a TunnelManager rather than a quic::Server. We only need one quic server, but we need a separate quic client instance per outgoing quic tunnel, and TunnelManager handles all that glue now. Adds QUIC packet handling to get to the right tunnel code. This required multiplexing incoming quic packets, as follows: Adds a very small quic tunnel packet header of 4 bytes: [1, SPORT, ECN] for client->server packets, where SPORT is our source "port" (really: just a uint16_t unique quic instance identifier) or [2, DPORT, ECN] for server->client packets where the DPORT is the SPORT from above. (This also reworks ECN bits to get properly carried over lokinet.) We don't need a destination/source port for the server-side because there is only ever one quic server (and we know we're going to it when the first byte of the header is 1). Removes the config option for quic exposing ports; a full lokinet will simply accept anything incoming on quic and tunnel it to the requested port on the the local endpoint IP (this handler will come in a following commit). Replace ConvoTags with full addresses: we need to carry the port, as well, which the ConvoTag can't give us, so change those to more general SockAddrs from which we can extract both the ConvoTag *and* the port. Add a pending connection queue along with new quic-side handlers to call when a stream becomes available (TunnelManager uses this to wire up pending incoming conns with quic streams as streams open up). Completely get rid of tunnel_server/tunnel_client.cpp code; it is now moved to tunnel.hpp. Add listen()/forget() methods in TunnelManager for setting up quic listening sockets (for liblokinet usage). Add open()/close() methods in TunnelManager for spinning up new quic clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
2023-10-16 16:55:51 +00:00
bool
Endpoint::Configure(const NetworkConfig& conf, [[maybe_unused]] const DnsConfig& dnsConf)
{
if (conf.m_Paths.has_value())
numDesiredPaths = *conf.m_Paths;
if (conf.m_Hops.has_value())
numHops = *conf.m_Hops;
2023-10-16 16:55:51 +00:00
conf.m_ExitMap.ForEachEntry(
[&](const IPRange& range, const service::Address& addr) { MapExitRange(range, addr); });
for (auto [exit, auth] : conf.m_ExitAuths)
{
2023-10-16 16:55:51 +00:00
SetAuthInfoForEndpoint(exit, auth);
}
2023-10-16 16:55:51 +00:00
conf.m_LNSExitMap.ForEachEntry([&](const IPRange& range, const std::string& name) {
std::optional<AuthInfo> auth;
const auto itr = conf.m_LNSExitAuths.find(name);
if (itr != conf.m_LNSExitAuths.end())
auth = itr->second;
_startup_ons_mappings[name] = std::make_pair(range, auth);
});
2023-10-16 16:55:51 +00:00
return _state->Configure(conf);
}
2023-10-16 16:55:51 +00:00
bool
Endpoint::HasPendingPathToService(const Address& addr) const
{
return _state->pending_service_lookups.find(addr) != _state->pending_service_lookups.end();
}
2023-10-16 16:55:51 +00:00
bool
Endpoint::is_ready() const
{
const auto now = llarp::time_now_ms();
if (intro_set().intros.empty())
return false;
if (intro_set().IsExpired(now))
return false;
return true;
}
2023-10-16 16:55:51 +00:00
bool
Endpoint::HasPendingRouterLookup(const RouterID remote) const
{
const auto& routers = _state->pending_routers;
return routers.find(remote) != routers.end();
}
2018-08-09 19:02:17 +00:00
2023-10-16 16:55:51 +00:00
std::optional<std::variant<Address, RouterID>>
Endpoint::GetEndpointWithConvoTag(ConvoTag tag) const
{
auto itr = Sessions().find(tag);
if (itr != Sessions().end())
2018-08-10 03:51:38 +00:00
{
2023-10-16 16:55:51 +00:00
return itr->second.remote.Addr();
}
2023-10-16 16:55:51 +00:00
for (const auto& item : _state->snode_sessions)
{
2023-10-16 16:55:51 +00:00
if (const auto maybe = item.second->CurrentPath())
{
if (ConvoTag{maybe->as_array()} == tag)
return item.first;
}
}
2023-10-16 16:55:51 +00:00
return std::nullopt;
}
2023-10-16 16:55:51 +00:00
void
Endpoint::map_exit(
std::string name,
std::string token,
std::vector<IPRange> ranges,
std::function<void(bool, std::string)> result_handler)
{
if (ranges.empty())
{
2023-10-16 16:55:51 +00:00
result_handler(false, "no ranges provided");
return;
}
2023-10-16 16:55:51 +00:00
lookup_name(
name,
[ptr = std::static_pointer_cast<Endpoint>(GetSelf()),
name,
auth = AuthInfo{token},
ranges,
result_handler,
poker = router()->route_poker()](oxen::quic::message m) mutable {
if (m)
{
std::string name;
try
{
oxenc::bt_dict_consumer btdc{m.body()};
name = btdc.require<std::string>("NAME");
}
catch (...)
{
log::warning(link_cat, "Failed to parse find name response!");
throw;
}
if (auto saddr = service::Address(); saddr.FromString(name))
{
ptr->SetAuthInfoForEndpoint(saddr, auth);
ptr->MarkAddressOutbound(saddr);
auto result = ptr->EnsurePathToService(
saddr,
[ptr, name, ranges, result_handler, poker](auto addr, OutboundContext* ctx) {
2023-10-16 16:55:51 +00:00
if (ctx == nullptr)
{
result_handler(false, "could not establish flow to {}"_format(name));
return;
}
// make a lambda that sends the reply after doing auth
auto apply_result = [ptr, poker, addr, result_handler, ranges](
std::string result, bool success) {
if (success)
{
for (const auto& range : ranges)
ptr->MapExitRange(range, addr);
if (poker)
poker->put_up();
2023-10-16 16:55:51 +00:00
result_handler(true, result);
}
result_handler(false, result);
};
2023-10-16 16:55:51 +00:00
ctx->send_auth_async(apply_result);
2023-10-16 16:55:51 +00:00
},
ptr->PathAlignmentTimeout());
if (not result)
result_handler(false, "Could not build path to {}"_format(name));
}
}
else
{
result_handler(false, "Exit {} not found!"_format(name));
}
});
}
void
Endpoint::LookupServiceAsync(
std::string name,
std::string service,
std::function<void(std::vector<dns::SRVData>)> resultHandler)
{
// handles when we aligned to a loki address
auto handleGotPathToService = [resultHandler, service, this](auto addr) {
// we can probably get this info before we have a path to them but we do this after we
// have a path so when we send the response back they can send shit to them immediately
const auto& container = _state->remote_sessions;
if (auto itr = container.find(addr); itr != container.end())
{
// parse the stuff we need from this guy
resultHandler(itr->second->GetCurrentIntroSet().GetMatchingSRVRecords(service));
return;
}
2023-10-16 16:55:51 +00:00
resultHandler({});
};
2019-07-30 23:42:13 +00:00
2023-10-16 16:55:51 +00:00
// handles when we resolved a .snode
auto handleResolvedSNodeName = [resultHandler, nodedb = router()->node_db()](auto router_id) {
std::vector<dns::SRVData> result{};
2023-10-16 16:55:51 +00:00
if (auto maybe_rc = nodedb->get_rc(router_id))
{
result = maybe_rc->srvRecords; // TODO: RouterContact has no SRV records
}
2023-10-16 16:55:51 +00:00
resultHandler(std::move(result));
};
2023-10-16 16:55:51 +00:00
// handles when we got a path to a remote thing
auto handleGotPathTo = [handleGotPathToService, handleResolvedSNodeName, resultHandler](
auto maybe_tag, auto address) {
if (not maybe_tag)
2023-01-24 18:14:00 +00:00
{
2023-10-16 16:55:51 +00:00
resultHandler({});
2023-01-24 18:14:00 +00:00
return;
}
2023-10-16 16:55:51 +00:00
if (auto* addr = std::get_if<Address>(&address))
{
// .loki case
handleGotPathToService(*addr);
}
else if (auto* router_id = std::get_if<RouterID>(&address))
{
// .snode case
handleResolvedSNodeName(*router_id);
}
else
{
// fallback case
// XXX: never should happen but we'll handle it anyways
resultHandler({});
}
};
2023-10-16 16:55:51 +00:00
// handles when we know a long address of a remote resource
auto handleGotAddress = [resultHandler, handleGotPathTo, this](AddressVariant_t address) {
// we will attempt a build to whatever we looked up
const auto result = EnsurePathTo(
address,
[address, handleGotPathTo](auto maybe_tag) { handleGotPathTo(maybe_tag, address); },
PathAlignmentTimeout());
2023-01-24 18:14:00 +00:00
2023-10-16 16:55:51 +00:00
// on path build start fail short circuit
if (not result)
2022-04-16 16:41:34 +00:00
resultHandler({});
2023-10-16 16:55:51 +00:00
};
2022-04-16 16:41:34 +00:00
2023-10-16 16:55:51 +00:00
// look up this name async and start the entire chain of events
lookup_name(name, [handleGotAddress, resultHandler](oxen::quic::message m) mutable {
if (m)
{
std::string name;
try
2022-04-16 16:41:34 +00:00
{
2023-10-16 16:55:51 +00:00
oxenc::bt_dict_consumer btdc{m.body()};
name = btdc.require<std::string>("NAME");
2022-04-16 16:41:34 +00:00
}
2023-10-16 16:55:51 +00:00
catch (...)
2022-04-16 16:41:34 +00:00
{
2023-10-16 16:55:51 +00:00
log::warning(link_cat, "Failed to parse find name response!");
throw;
2022-04-16 16:41:34 +00:00
}
2023-10-16 16:55:51 +00:00
if (auto saddr = service::Address(); saddr.FromString(name))
handleGotAddress(saddr);
2023-10-16 16:55:51 +00:00
if (auto rid = RouterID(); rid.FromString(name))
handleGotAddress(rid);
}
else
{
resultHandler({});
}
});
}
2023-10-16 16:55:51 +00:00
bool
Endpoint::IntrosetIsStale() const
{
return intro_set().HasExpiredIntros(llarp::time_now_ms());
}
2021-04-12 11:39:07 +00:00
2023-10-16 16:55:51 +00:00
util::StatusObject
Endpoint::ExtractStatus() const
{
auto obj = path::Builder::ExtractStatus();
obj["exitMap"] = _exit_map.ExtractStatus();
obj["identity"] = _identity.pub.Addr().ToString();
obj["networkReady"] = ReadyForNetwork();
2023-10-16 16:55:51 +00:00
util::StatusObject authCodes;
for (const auto& [service, info] : _remote_auth_infos)
2019-02-08 19:43:25 +00:00
{
2023-10-16 16:55:51 +00:00
authCodes[service.ToString()] = info.token;
}
obj["authCodes"] = authCodes;
2023-10-16 16:55:51 +00:00
return _state->ExtractStatus(obj);
}
2023-10-16 16:55:51 +00:00
void
Endpoint::Tick(llarp_time_t)
{
const auto now = llarp::time_now_ms();
path::Builder::Tick(now);
// publish descriptors
if (ShouldPublishDescriptors(now))
{
regen_and_publish_introset();
2019-02-08 19:43:25 +00:00
}
2023-10-16 16:55:51 +00:00
// decay introset lookup filter
_introset_lookup_filter.Decay(now);
// expire name cache
_state->nameCache.Decay(now);
// expire snode sessions
EndpointUtil::ExpireSNodeSessions(now, _state->snode_sessions);
// expire pending router lookups
EndpointUtil::ExpirePendingRouterLookups(now, _state->pending_routers);
2019-02-08 19:43:25 +00:00
2023-10-16 16:55:51 +00:00
// deregister dead sessions
EndpointUtil::DeregisterDeadSessions(now, _state->dead_sessions);
// tick remote sessions
EndpointUtil::TickRemoteSessions(
now, _state->remote_sessions, _state->dead_sessions, Sessions());
// expire convotags
EndpointUtil::ExpireConvoSessions(now, Sessions());
if (NumInStatus(path::ePathEstablished) > 1)
{
2023-10-16 16:55:51 +00:00
for (const auto& item : _startup_ons_mappings)
{
2023-10-16 16:55:51 +00:00
auto& name = item.first;
2023-10-16 16:55:51 +00:00
lookup_name(name, [this, name, info = item.second](oxen::quic::message m) mutable {
if (m)
{
std::string result;
try
{
2023-10-16 16:55:51 +00:00
oxenc::bt_dict_consumer btdc{m.body()};
result = btdc.require<std::string>("NAME");
}
catch (...)
{
log::warning(link_cat, "Failed to parse find name response!");
throw;
}
2023-10-16 16:55:51 +00:00
const auto maybe_range = info.first;
const auto maybe_auth = info.second;
2023-10-16 16:55:51 +00:00
_startup_ons_mappings.erase(name);
2023-10-16 16:55:51 +00:00
if (auto saddr = service::Address(); saddr.FromString(result))
{
if (maybe_range.has_value())
_exit_map.Insert(*maybe_range, saddr);
if (maybe_auth.has_value())
SetAuthInfoForEndpoint(saddr, *maybe_auth);
}
2023-10-16 16:55:51 +00:00
}
});
}
2018-09-24 15:52:25 +00:00
}
2023-10-16 16:55:51 +00:00
}
bool
Endpoint::Stop()
{
// stop remote sessions
log::debug(logcat, "Endpoint stopping remote sessions.");
EndpointUtil::StopRemoteSessions(_state->remote_sessions);
// stop snode sessions
log::debug(logcat, "Endpoint stopping snode sessions.");
EndpointUtil::StopSnodeSessions(_state->snode_sessions);
log::debug(logcat, "Endpoint stopping its path builder.");
return path::Builder::Stop();
}
uint64_t
Endpoint::GenTXID()
{
return randint();
}
std::string
Endpoint::Name() const
{
return _state->name + ":" + _identity.pub.Name();
}
2018-09-24 15:52:25 +00:00
2023-10-16 16:55:51 +00:00
bool
Endpoint::HasInboundConvo(const Address& addr) const
{
for (const auto& item : Sessions())
{
2023-10-16 16:55:51 +00:00
if (item.second.remote.Addr() == addr and item.second.inbound)
return true;
}
2023-10-16 16:55:51 +00:00
return false;
}
2023-10-16 16:55:51 +00:00
bool
Endpoint::HasOutboundConvo(const Address& addr) const
{
for (const auto& item : Sessions())
2018-07-18 03:10:21 +00:00
{
2023-10-16 16:55:51 +00:00
if (item.second.remote.Addr() == addr && not item.second.inbound)
return true;
}
2023-10-16 16:55:51 +00:00
return false;
}
2023-10-16 16:55:51 +00:00
void
Endpoint::PutSenderFor(const ConvoTag& tag, const ServiceInfo& info, bool inbound)
{
if (info.Addr().IsZero())
2018-07-16 03:32:13 +00:00
{
2023-10-16 16:55:51 +00:00
LogError(Name(), " cannot put invalid service info ", info, " T=", tag);
return;
2018-08-04 02:59:32 +00:00
}
2023-10-16 16:55:51 +00:00
auto itr = Sessions().find(tag);
if (itr == Sessions().end())
{
2023-10-16 16:55:51 +00:00
if (WantsOutboundSession(info.Addr()) and inbound)
{
2023-10-16 16:55:51 +00:00
LogWarn(
Name(),
" not adding sender for ",
info.Addr(),
" session is inbound and we want outbound T=",
tag);
return;
}
2023-10-16 16:55:51 +00:00
itr = Sessions().emplace(tag, Session{}).first;
itr->second.inbound = inbound;
itr->second.remote = info;
}
2023-10-16 16:55:51 +00:00
}
2023-10-16 16:55:51 +00:00
size_t
Endpoint::RemoveAllConvoTagsFor(service::Address remote)
{
size_t removed = 0;
auto& sessions = Sessions();
auto itr = sessions.begin();
while (itr != sessions.end())
{
2023-10-16 16:55:51 +00:00
if (itr->second.remote.Addr() == remote)
{
2023-10-16 16:55:51 +00:00
itr = sessions.erase(itr);
removed++;
}
2023-10-16 16:55:51 +00:00
else
++itr;
}
return removed;
}
bool
Endpoint::GetSenderFor(const ConvoTag& tag, ServiceInfo& si) const
{
auto itr = Sessions().find(tag);
if (itr == Sessions().end())
return false;
si = itr->second.remote;
si.UpdateAddr();
return true;
}
void
Endpoint::PutIntroFor(const ConvoTag& tag, const Introduction& intro)
{
auto& s = Sessions()[tag];
s.intro = intro;
}
bool
Endpoint::GetIntroFor(const ConvoTag& tag, Introduction& intro) const
{
auto itr = Sessions().find(tag);
if (itr == Sessions().end())
return false;
2023-10-16 16:55:51 +00:00
intro = itr->second.intro;
return true;
}
void
Endpoint::PutReplyIntroFor(const ConvoTag& tag, const Introduction& intro)
{
auto itr = Sessions().find(tag);
if (itr == Sessions().end())
{
return;
}
2023-10-16 16:55:51 +00:00
itr->second.replyIntro = intro;
}
bool
Endpoint::GetReplyIntroFor(const ConvoTag& tag, Introduction& intro) const
{
auto itr = Sessions().find(tag);
if (itr == Sessions().end())
return false;
intro = itr->second.replyIntro;
return true;
}
bool
Endpoint::GetConvoTagsForService(const Address& addr, std::set<ConvoTag>& tags) const
{
return EndpointUtil::GetConvoTagsForService(Sessions(), addr, tags);
}
bool
Endpoint::GetCachedSessionKeyFor(const ConvoTag& tag, SharedSecret& secret) const
{
auto itr = Sessions().find(tag);
if (itr == Sessions().end())
return false;
secret = itr->second.sharedKey;
return true;
}
2023-10-16 16:55:51 +00:00
void
Endpoint::PutCachedSessionKeyFor(const ConvoTag& tag, const SharedSecret& k)
{
auto itr = Sessions().find(tag);
if (itr == Sessions().end())
2018-08-09 19:02:17 +00:00
{
2023-10-16 16:55:51 +00:00
itr = Sessions().emplace(tag, Session{}).first;
2018-08-09 19:02:17 +00:00
}
2023-10-16 16:55:51 +00:00
itr->second.sharedKey = k;
}
void
Endpoint::ConvoTagTX(const ConvoTag& tag)
{
if (Sessions().count(tag))
Sessions()[tag].TX();
}
2018-08-09 19:02:17 +00:00
2023-10-16 16:55:51 +00:00
void
Endpoint::ConvoTagRX(const ConvoTag& tag)
{
if (Sessions().count(tag))
Sessions()[tag].RX();
}
bool
Endpoint::LoadKeyFile()
{
const auto& keyfile = _state->key_file;
if (!keyfile.empty())
{
2023-10-16 16:55:51 +00:00
_identity.EnsureKeys(keyfile, router()->key_manager()->needBackup());
}
2023-10-16 16:55:51 +00:00
else
{
_identity.RegenerateKeys();
}
return true;
}
2023-10-16 16:55:51 +00:00
bool
Endpoint::Start()
{
// this does network isolation
while (_state->on_init_callbacks.size())
2018-08-09 19:02:17 +00:00
{
2023-10-16 16:55:51 +00:00
if (_state->on_init_callbacks.front()())
_state->on_init_callbacks.pop_front();
else
{
LogWarn("Can't call init of network isolation");
2018-08-09 19:02:17 +00:00
return false;
2023-10-16 16:55:51 +00:00
}
2018-08-09 19:02:17 +00:00
}
2023-10-16 16:55:51 +00:00
return true;
}
// Keep this here (rather than the header) so that we don't need to include endpoint_state.hpp
// in endpoint.hpp for the unique_ptr member destructor.
Endpoint::~Endpoint() = default;
2018-08-09 19:02:17 +00:00
2023-10-16 16:55:51 +00:00
void
Endpoint::regen_and_publish_introset()
{
const auto now = llarp::time_now_ms();
_last_introset_regen_attempt = now;
std::set<Introduction, CompareIntroTimestamp> intros;
if (const auto maybe =
GetCurrentIntroductionsWithFilter([now](const service::Introduction& intro) -> bool {
return not intro.ExpiresSoon(now, path::INTRO_STALE_THRESHOLD);
}))
2018-08-09 19:02:17 +00:00
{
2023-10-16 16:55:51 +00:00
intros.insert(maybe->begin(), maybe->end());
2018-08-09 19:02:17 +00:00
}
2023-10-16 16:55:51 +00:00
else
2018-08-09 19:02:17 +00:00
{
2023-10-16 16:55:51 +00:00
LogWarn(
"could not publish descriptors for endpoint ",
Name(),
" because we couldn't get enough valid introductions");
BuildOne();
return;
2018-08-09 19:02:17 +00:00
}
2023-10-16 16:55:51 +00:00
intro_set().supported_protocols.clear();
// add supported ethertypes
if (HasIfAddr())
2019-02-21 16:45:33 +00:00
{
2023-10-16 16:55:51 +00:00
if (IPRange::V4MappedRange().Contains(GetIfAddr()))
2019-02-21 16:45:33 +00:00
{
2023-10-16 16:55:51 +00:00
intro_set().supported_protocols.push_back(ProtocolType::TrafficV4);
}
else
{
intro_set().supported_protocols.push_back(ProtocolType::TrafficV6);
2019-02-21 16:45:33 +00:00
}
2023-10-16 16:55:51 +00:00
// exit related stuffo
if (_state->is_exit_enabled)
{
intro_set().supported_protocols.push_back(ProtocolType::Exit);
intro_set().exit_policy = GetExitPolicy();
intro_set().owned_ranges = GetOwnedRanges();
}
}
// add quic ethertype if we have listeners set up
// if (auto* quic = GetQUICTunnel())
// {
// TODO:
// if (quic->hasListeners())
// intro_set().supported_protocols.push_back(ProtocolType::QUIC);
// }
2019-02-21 16:45:33 +00:00
2023-10-16 16:55:51 +00:00
intro_set().intros.clear();
for (auto& intro : intros)
2018-08-09 19:02:17 +00:00
{
2023-10-16 16:55:51 +00:00
if (intro_set().intros.size() < numDesiredPaths)
intro_set().intros.emplace_back(std::move(intro));
2018-08-09 19:02:17 +00:00
}
2023-10-16 16:55:51 +00:00
if (intro_set().intros.empty())
2018-08-09 19:02:17 +00:00
{
2023-10-16 16:55:51 +00:00
LogWarn("not enough intros to publish introset for ", Name());
if (ShouldBuildMore(now))
ManualRebuild(1);
return;
2018-08-09 19:02:17 +00:00
}
2023-10-16 16:55:51 +00:00
auto maybe = _identity.encrypt_and_sign_introset(intro_set(), now);
if (not maybe)
2018-08-09 19:02:17 +00:00
{
2023-10-16 16:55:51 +00:00
LogWarn("failed to generate introset for endpoint ", Name());
return;
2018-08-09 19:02:17 +00:00
}
2023-10-16 16:55:51 +00:00
if (publish_introset(*maybe))
2019-09-19 20:28:12 +00:00
{
2023-10-16 16:55:51 +00:00
LogInfo("(re)publishing introset for endpoint ", Name());
}
2023-10-16 16:55:51 +00:00
else
{
2023-10-16 16:55:51 +00:00
LogWarn("failed to publish intro set for endpoint ", Name());
2019-09-19 20:28:12 +00:00
}
2023-10-16 16:55:51 +00:00
}
2019-09-19 20:28:12 +00:00
2023-10-16 16:55:51 +00:00
bool
Endpoint::publish_introset(const EncryptedIntroSet& introset)
{
const auto paths = GetManyPathsWithUniqueEndpoints(
this, INTROSET_RELAY_REDUNDANCY, dht::Key_t{introset.derivedSigningKey.as_array()});
if (paths.size() != INTROSET_RELAY_REDUNDANCY)
{
2023-10-16 16:55:51 +00:00
LogWarn(
"Cannot publish intro set because we only have ",
paths.size(),
" paths, but need ",
INTROSET_RELAY_REDUNDANCY);
return false;
}
2023-10-16 16:55:51 +00:00
for (const auto& path : paths)
{
2023-10-16 16:55:51 +00:00
for (size_t i = 0; i < INTROSET_REQS_PER_RELAY; ++i)
2018-08-09 19:02:17 +00:00
{
2023-10-16 16:55:51 +00:00
router()->send_control_message(path->upstream(), "publish_intro", introset.bt_encode());
2018-08-09 19:02:17 +00:00
}
}
2023-10-16 16:55:51 +00:00
return true;
}
2023-10-16 16:55:51 +00:00
size_t
Endpoint::UniqueEndpoints() const
{
return _state->remote_sessions.size() + _state->snode_sessions.size();
}
[[maybe_unused]] constexpr auto PublishIntrosetTimeout = 20s;
2023-10-16 16:55:51 +00:00
void
Endpoint::ResetInternalState()
{
path::Builder::ResetInternalState();
static auto resetState = [](auto& container, auto getter) {
std::for_each(container.begin(), container.end(), [getter](auto& item) {
getter(item)->ResetInternalState();
});
};
resetState(_state->remote_sessions, [](const auto& item) { return item.second; });
resetState(_state->snode_sessions, [](const auto& item) { return item.second; });
}
2023-10-16 16:55:51 +00:00
bool
Endpoint::ShouldPublishDescriptors(llarp_time_t now) const
{
if (not _publish_introset)
return false;
2023-10-16 16:55:51 +00:00
const auto lastEventAt = std::max(_state->last_publish_attempt, _state->last_publish);
const auto next_pub = lastEventAt
+ (_state->local_introset.HasStaleIntros(now, path::INTRO_STALE_THRESHOLD)
? IntrosetPublishRetryCooldown
: IntrosetPublishInterval);
2023-10-16 16:55:51 +00:00
return now >= next_pub;
}
2018-09-18 14:48:06 +00:00
2023-10-16 16:55:51 +00:00
std::optional<std::vector<RouterContact>>
Endpoint::GetHopsForBuild()
{
std::unordered_set<RouterID> exclude;
ForEachPath([&exclude](auto path) { exclude.insert(path->Endpoint()); });
const auto maybe =
router()->node_db()->GetRandom([exclude, r = router()](const auto& rc) -> bool {
return exclude.count(rc.pubkey) == 0
and not r->router_profiling().IsBadForPath(rc.pubkey);
});
if (not maybe.has_value())
return std::nullopt;
return GetHopsForBuildWithEndpoint(maybe->router_id());
2023-10-16 16:55:51 +00:00
}
2023-10-16 16:55:51 +00:00
std::optional<std::vector<RouterContact>>
Endpoint::GetHopsForBuildWithEndpoint(RouterID endpoint)
{
return path::Builder::GetHopsAlignedToForBuild(endpoint, SnodeBlacklist());
}
2023-10-16 16:55:51 +00:00
constexpr auto MaxOutboundContextPerRemote = 1;
2023-10-16 16:55:51 +00:00
void
Endpoint::PutNewOutboundContext(const service::IntroSet& introset, llarp_time_t)
2023-10-16 16:55:51 +00:00
{
const Address addr{introset.address_keys.Addr()};
2023-10-16 16:55:51 +00:00
auto& remoteSessions = _state->remote_sessions;
2018-07-18 03:10:21 +00:00
2023-10-16 16:55:51 +00:00
if (remoteSessions.count(addr) < MaxOutboundContextPerRemote)
{
2023-10-16 16:55:51 +00:00
remoteSessions.emplace(addr, std::make_shared<OutboundContext>(introset, this));
LogInfo("Created New outbound context for ", addr.ToString());
}
2019-05-10 16:19:33 +00:00
2023-10-16 16:55:51 +00:00
auto sessionRange = remoteSessions.equal_range(addr);
for (auto itr = sessionRange.first; itr != sessionRange.second; ++itr)
2019-05-10 16:19:33 +00:00
{
// TODO:
// itr->second->AddReadyHook(
// [addr, this](auto session) { InformPathToService(addr, session); }, left);
2019-05-10 16:19:33 +00:00
}
2023-10-16 16:55:51 +00:00
}
2019-05-10 16:19:33 +00:00
2023-10-16 16:55:51 +00:00
bool
Endpoint::HasExit() const
{
for (const auto& [name, info] : _startup_ons_mappings)
{
2023-10-16 16:55:51 +00:00
if (info.first.has_value())
return true;
}
2023-10-16 16:55:51 +00:00
return not _exit_map.Empty();
}
2018-07-22 23:14:29 +00:00
2023-10-16 16:55:51 +00:00
auto
Endpoint::GetUniqueEndpointsForLookup() const
{
path::UniqueEndpointSet_t paths;
2018-07-22 23:14:29 +00:00
2023-10-16 16:55:51 +00:00
ForEachPath([&paths](auto path) {
if (path and path->IsReady())
paths.insert(path);
});
2018-07-22 23:14:29 +00:00
2023-10-16 16:55:51 +00:00
return paths;
}
2023-10-16 16:55:51 +00:00
bool
Endpoint::ReadyForNetwork() const
{
return is_ready() and ReadyToDoLookup(GetUniqueEndpointsForLookup().size());
}
2023-10-16 16:55:51 +00:00
bool
Endpoint::ReadyToDoLookup(size_t num_paths) const
{
// Currently just checks the number of paths, but could do more checks in the future.
return num_paths >= MIN_ONS_LOOKUP_ENDPOINTS;
}
2023-10-16 16:55:51 +00:00
void
Endpoint::lookup_name(std::string name, std::function<void(oxen::quic::message)> func)
{
// TODO: so fuck all this?
// if (not is_valid_name(name))
// {
// handler(parse_address(name));
// return;
// }
// auto& cache = _state->nameCache;
// const auto maybe = cache.Get(name);
// if (maybe.has_value())
// {
// handler(maybe);
// return;
// }
log::info(link_cat, "{} looking up ONS name {}", Name(), name);
auto paths = GetUniqueEndpointsForLookup();
// // not enough paths
// if (not ReadyToDoLookup(paths.size()))
// {
// LogWarn(
// Name(),
// " not enough paths for lns lookup, have ",
// paths.size(),
// " need ",
// MIN_ONS_LOOKUP_ENDPOINTS);
// handler(std::nullopt);
// return;
// }
// pick up to max_unique_lns_endpoints random paths to do lookups from
std::vector<path::Path_ptr> chosenpaths;
chosenpaths.insert(chosenpaths.begin(), paths.begin(), paths.end());
std::shuffle(chosenpaths.begin(), chosenpaths.end(), llarp::csrng);
2023-10-16 16:55:51 +00:00
chosenpaths.resize(std::min(paths.size(), MAX_ONS_LOOKUP_ENDPOINTS));
for (const auto& path : chosenpaths)
{
log::info(link_cat, "{} lookup {} from {}", Name(), name, path->Endpoint());
path->find_name(name, func);
}
}
void
Endpoint::EnsureRouterIsKnown(const RouterID& rid)
{
if (rid.IsZero())
return;
if (!router()->node_db()->has_router(rid))
{
2023-10-16 16:55:51 +00:00
lookup_router(rid);
}
2023-10-16 16:55:51 +00:00
}
2023-10-16 16:55:51 +00:00
bool
Endpoint::lookup_router(RouterID rid, std::function<void(oxen::quic::message)> func)
{
const auto& routers = _state->pending_routers;
2022-10-25 00:39:05 +00:00
2023-10-16 16:55:51 +00:00
if (routers.find(rid) == routers.end())
{
2023-10-16 16:55:51 +00:00
auto path = GetEstablishedPathClosestTo(rid);
path->find_router("find_router", func);
return true;
2022-10-25 00:39:05 +00:00
}
2023-10-16 16:55:51 +00:00
return false;
}
2023-10-16 16:55:51 +00:00
void
Endpoint::HandlePathBuilt(path::Path_ptr p)
{
// p->SetDataHandler(util::memFn(&Endpoint::HandleHiddenServiceFrame, this));
// p->SetDropHandler(util::memFn(&Endpoint::HandleDataDrop, this));
// p->SetDeadChecker(util::memFn(&Endpoint::CheckPathIsDead, this));
path::Builder::HandlePathBuilt(p);
}
bool
Endpoint::HandleDataDrop(path::Path_ptr p, const PathID_t& dst, uint64_t seq)
{
LogWarn(Name(), " message ", seq, " dropped by endpoint ", p->Endpoint(), " via ", dst);
return true;
}
2023-10-16 16:55:51 +00:00
std::unordered_map<std::string, std::string>
Endpoint::NotifyParams() const
{
return {{"LOKINET_ADDR", _identity.pub.Addr().ToString()}};
}
2023-10-16 16:55:51 +00:00
void
Endpoint::FlushRecvData()
{
while (auto maybe = _recv_event_queue.tryPopFront())
{
auto& ev = *maybe;
ProtocolMessage::ProcessAsync(ev.fromPath, ev.pathid, ev.msg);
}
2023-10-16 16:55:51 +00:00
}
2023-10-16 16:55:51 +00:00
void
Endpoint::QueueRecvData(RecvDataEvent ev)
{
_recv_event_queue.tryPushBack(std::move(ev));
router()->TriggerPump();
}
bool
Endpoint::HandleDataMessage(
path::Path_ptr p, const PathID_t from, std::shared_ptr<ProtocolMessage> msg)
{
PutSenderFor(msg->tag, msg->sender, true);
Introduction intro = msg->introReply;
2023-10-16 16:55:51 +00:00
if (HasInboundConvo(msg->sender.Addr()))
2018-08-10 21:34:11 +00:00
{
2023-10-16 16:55:51 +00:00
intro.path_id = from;
intro.router = p->Endpoint();
2018-12-19 17:48:29 +00:00
}
2023-10-16 16:55:51 +00:00
PutReplyIntroFor(msg->tag, intro);
ConvoTagRX(msg->tag);
return ProcessDataMessage(msg);
}
2018-08-10 21:34:11 +00:00
2023-10-16 16:55:51 +00:00
bool
Endpoint::HasPathToSNode(const RouterID ident) const
{
auto range = _state->snode_sessions.equal_range(ident);
auto itr = range.first;
while (itr != range.second)
2018-12-19 17:48:29 +00:00
{
2023-10-16 16:55:51 +00:00
if (itr->second->IsReady())
2018-12-19 17:48:29 +00:00
{
return true;
2018-08-10 21:34:11 +00:00
}
2023-10-16 16:55:51 +00:00
++itr;
2018-08-10 21:34:11 +00:00
}
2023-10-16 16:55:51 +00:00
return false;
}
2018-08-10 21:34:11 +00:00
2023-10-16 16:55:51 +00:00
EndpointBase::AddressVariant_t
Endpoint::LocalAddress() const
{
return _identity.pub.Addr();
}
2023-10-16 16:55:51 +00:00
std::optional<EndpointBase::SendStat>
Endpoint::GetStatFor(AddressVariant_t) const
{
// TODO: implement me
return std::nullopt;
}
2023-10-16 16:55:51 +00:00
std::unordered_set<EndpointBase::AddressVariant_t>
Endpoint::AllRemoteEndpoints() const
{
std::unordered_set<AddressVariant_t> remote;
for (const auto& item : Sessions())
{
2023-10-16 16:55:51 +00:00
remote.insert(item.second.remote.Addr());
}
2023-10-16 16:55:51 +00:00
for (const auto& item : _state->snode_sessions)
2019-11-28 23:08:02 +00:00
{
2023-10-16 16:55:51 +00:00
remote.insert(item.first);
2019-11-28 23:08:02 +00:00
}
2023-10-16 16:55:51 +00:00
return remote;
}
2019-11-28 23:08:02 +00:00
2023-10-16 16:55:51 +00:00
bool
Endpoint::ProcessDataMessage(std::shared_ptr<ProtocolMessage> msg)
{
if ((msg->proto == ProtocolType::Exit
&& (_state->is_exit_enabled || _exit_map.ContainsValue(msg->sender.Addr())))
|| msg->proto == ProtocolType::TrafficV4 || msg->proto == ProtocolType::TrafficV6
|| (msg->proto == ProtocolType::QUIC and _tunnel_manager))
2019-11-28 23:08:02 +00:00
{
2023-10-16 16:55:51 +00:00
_inbound_queue.tryPushBack(std::move(msg));
router()->TriggerPump();
2023-10-16 16:55:51 +00:00
return true;
2019-11-28 23:08:02 +00:00
}
2023-10-16 16:55:51 +00:00
if (msg->proto == ProtocolType::Control)
{
2023-10-16 16:55:51 +00:00
// TODO: implement me (?)
// right now it's just random noise
return true;
}
2023-10-16 16:55:51 +00:00
return false;
}
2023-10-16 16:55:51 +00:00
void
Endpoint::AsyncProcessAuthMessage(
std::shared_ptr<ProtocolMessage> msg, std::function<void(std::string, bool)> hook)
2023-10-16 16:55:51 +00:00
{
if (_auth_policy)
2018-11-29 14:01:13 +00:00
{
2023-10-16 16:55:51 +00:00
if (not _auth_policy->AsyncAuthPending(msg->tag))
{
2023-10-16 16:55:51 +00:00
// do 1 authentication attempt and drop everything else
_auth_policy->AuthenticateAsync(std::move(msg), std::move(hook));
2018-11-29 14:01:13 +00:00
}
}
2023-10-16 16:55:51 +00:00
else
{
router()->loop()->call([h = std::move(hook)] { h("OK", true); });
}
2023-10-16 16:55:51 +00:00
}
2023-10-16 16:55:51 +00:00
void
Endpoint::SendAuthResult(
path::Path_ptr path, PathID_t /* replyPath */, ConvoTag tag, std::string result, bool success)
2023-10-16 16:55:51 +00:00
{
// not applicable because we are not an exit or don't have an endpoint auth policy
if ((not _state->is_exit_enabled) or _auth_policy == nullptr)
return;
2023-10-16 16:55:51 +00:00
ProtocolFrameMessage f{};
f.flag = int(not success);
2023-10-16 16:55:51 +00:00
f.convo_tag = tag;
f.path_id = path->intro.path_id;
f.nonce.Randomize();
if (success)
2023-10-16 16:55:51 +00:00
{
ProtocolMessage msg;
msg.put_buffer(result);
2023-10-16 16:55:51 +00:00
if (_auth_policy)
msg.proto = ProtocolType::Auth;
else
msg.proto = ProtocolType::Control;
2023-10-16 16:55:51 +00:00
if (not GetReplyIntroFor(tag, msg.introReply))
{
2023-10-16 16:55:51 +00:00
LogError("Failed to send auth reply: no reply intro");
return;
}
2023-10-16 16:55:51 +00:00
msg.sender = _identity.pub;
SharedSecret sessionKey{};
2023-10-16 16:55:51 +00:00
if (not GetCachedSessionKeyFor(tag, sessionKey))
2018-11-29 13:12:35 +00:00
{
2023-10-16 16:55:51 +00:00
LogError("failed to send auth reply: no cached session key");
return;
2018-11-29 13:12:35 +00:00
}
2023-10-16 16:55:51 +00:00
if (not f.EncryptAndSign(msg, sessionKey, _identity))
2018-11-29 13:12:35 +00:00
{
2023-10-16 16:55:51 +00:00
LogError("Failed to encrypt and sign auth reply");
return;
2018-11-29 13:12:35 +00:00
}
}
2023-10-16 16:55:51 +00:00
else
2020-05-28 11:07:32 +00:00
{
2023-10-16 16:55:51 +00:00
if (not f.Sign(_identity))
2020-05-28 11:07:32 +00:00
{
2023-10-16 16:55:51 +00:00
LogError("failed to sign auth reply result");
return;
2020-05-28 11:07:32 +00:00
}
}
// TODO:
// _send_queue.tryPushBack(
// SendEvent{std::make_shared<routing::PathTransferMessage>(f, replyPath), path});
2023-10-16 16:55:51 +00:00
}
2020-05-28 11:07:32 +00:00
2023-10-16 16:55:51 +00:00
void
Endpoint::RemoveConvoTag(const ConvoTag& t)
{
Sessions().erase(t);
}
2019-03-08 16:00:45 +00:00
2023-10-16 16:55:51 +00:00
void
Endpoint::ResetConvoTag(ConvoTag tag, path::Path_ptr p, PathID_t /* from */)
2023-10-16 16:55:51 +00:00
{
// send reset convo tag message
ProtocolFrameMessage f{};
f.flag = 1;
f.convo_tag = tag;
f.path_id = p->intro.path_id;
f.Sign(_identity);
{
LogWarn("invalidating convotag T=", tag);
RemoveConvoTag(tag);
// TODO:
// _send_queue.tryPushBack(
// SendEvent{std::make_shared<routing::PathTransferMessage>(f, from), p});
}
2023-10-16 16:55:51 +00:00
}
2023-10-16 16:55:51 +00:00
bool
Endpoint::HandleHiddenServiceFrame(path::Path_ptr p, const ProtocolFrameMessage& frame)
{
if (frame.flag)
{
2023-10-16 16:55:51 +00:00
// handle discard
ServiceInfo si;
if (!GetSenderFor(frame.convo_tag, si))
return false;
// verify source
if (!frame.Verify(si))
return false;
// remove convotag it doesn't exist
LogWarn("remove convotag T=", frame.convo_tag, " R=", frame.flag, " from ", si.Addr());
RemoveConvoTag(frame.convo_tag);
return true;
}
2023-10-16 16:55:51 +00:00
if (not frame.AsyncDecryptAndVerify(router()->loop(), p, _identity, this))
2018-09-17 15:32:37 +00:00
{
2023-10-16 16:55:51 +00:00
ResetConvoTag(frame.convo_tag, p, frame.path_id);
2019-03-30 13:02:10 +00:00
}
2023-10-16 16:55:51 +00:00
return true;
}
2019-03-30 13:02:10 +00:00
2023-10-16 16:55:51 +00:00
void
Endpoint::HandlePathDied(path::Path_ptr p)
{
router()->router_profiling().MarkPathTimeout(p.get());
ManualRebuild(1);
path::Builder::HandlePathDied(p);
regen_and_publish_introset();
}
bool
Endpoint::CheckPathIsDead(path::Path_ptr, llarp_time_t dlt)
{
return dlt > path::ALIVE_TIMEOUT;
}
2023-10-16 16:55:51 +00:00
void
Endpoint::MarkAddressOutbound(service::Address addr)
{
_state->m_OutboundSessions.insert(addr);
}
2020-02-18 16:00:45 +00:00
2023-10-16 16:55:51 +00:00
bool
Endpoint::WantsOutboundSession(const Address& addr) const
{
return _state->m_OutboundSessions.count(addr) > 0;
}
2020-02-18 16:00:45 +00:00
2023-10-16 16:55:51 +00:00
void
Endpoint::InformPathToService(const Address remote, OutboundContext* ctx)
{
auto& serviceLookups = _state->pending_service_lookups;
auto range = serviceLookups.equal_range(remote);
auto itr = range.first;
while (itr != range.second)
{
itr->second(remote, ctx);
++itr;
}
serviceLookups.erase(remote);
}
bool
Endpoint::EnsurePathTo(
std::variant<Address, RouterID> addr,
std::function<void(std::optional<ConvoTag>)> hook,
llarp_time_t timeout)
{
if (auto ptr = std::get_if<Address>(&addr))
{
2023-10-16 16:55:51 +00:00
if (*ptr == _identity.pub.Addr())
{
2023-10-16 16:55:51 +00:00
ConvoTag tag{};
if (auto maybe = GetBestConvoTagFor(*ptr))
tag = *maybe;
else
tag.Randomize();
PutSenderFor(tag, _identity.pub, true);
ConvoTagTX(tag);
Sessions()[tag].forever = true;
Loop()->call_soon([tag, hook]() { hook(tag); });
return true;
}
if (not WantsOutboundSession(*ptr))
{
// we don't want to connect back to inbound sessions
hook(std::nullopt);
return true;
}
2023-10-16 16:55:51 +00:00
return EnsurePathToService(
*ptr,
[hook](auto, auto* ctx) -> bool {
2023-10-16 16:55:51 +00:00
if (ctx)
{
hook(ctx->get_current_tag());
return true;
2023-10-16 16:55:51 +00:00
}
hook(std::nullopt);
return false;
2023-10-16 16:55:51 +00:00
},
timeout);
}
if (auto ptr = std::get_if<RouterID>(&addr))
{
2023-10-16 16:55:51 +00:00
return EnsurePathToSNode(*ptr, [hook](auto, auto session, auto tag) {
if (session)
{
2023-10-16 16:55:51 +00:00
hook(tag);
}
2023-10-16 16:55:51 +00:00
else
{
hook(std::nullopt);
}
2023-10-16 16:55:51 +00:00
});
}
return false;
}
2023-10-16 16:55:51 +00:00
bool
Endpoint::EnsurePathToSNode(
const RouterID snode,
std::function<void(const RouterID, exit::BaseSession_ptr, ConvoTag)> hook)
{
auto& nodeSessions = _state->snode_sessions;
using namespace std::placeholders;
if (nodeSessions.count(snode) == 0)
{
const auto src = xhtonl(net::TruncateV6(GetIfAddr()));
const auto dst = xhtonl(net::TruncateV6(ObtainIPForAddr(snode)));
auto session = std::make_shared<exit::SNodeSession>(
snode,
[=](const llarp_buffer_t& buf) -> bool {
net::IPPacket pkt;
if (not pkt.Load(buf))
return false;
pkt.UpdateIPv4Address(src, dst);
/// TODO: V6
auto itr = _state->snode_sessions.find(snode);
if (itr == _state->snode_sessions.end())
return false;
if (const auto maybe = itr->second->CurrentPath())
return HandleInboundPacket(
ConvoTag{maybe->as_array()}, pkt.ConstBuffer(), ProtocolType::TrafficV4, 0);
return false;
},
router(),
1,
numHops,
false,
this);
_state->snode_sessions[snode] = session;
}
EnsureRouterIsKnown(snode);
auto range = nodeSessions.equal_range(snode);
auto itr = range.first;
while (itr != range.second)
{
if (itr->second->IsReady())
hook(snode, itr->second, ConvoTag{itr->second->CurrentPath()->as_array()});
else
{
2023-10-16 16:55:51 +00:00
itr->second->AddReadyHook([hook, snode](auto session) {
if (session)
{
2023-10-16 16:55:51 +00:00
hook(snode, session, ConvoTag{session->CurrentPath()->as_array()});
}
else
{
2023-10-16 16:55:51 +00:00
hook(snode, nullptr, ConvoTag{});
}
});
2023-10-16 16:55:51 +00:00
if (not itr->second->BuildCooldownHit(Now()))
itr->second->BuildOne();
}
2023-10-16 16:55:51 +00:00
++itr;
}
2023-10-16 16:55:51 +00:00
return true;
}
2023-10-16 16:55:51 +00:00
bool
Endpoint::EnsurePathToService(
const Address remote,
std::function<void(Address, OutboundContext*)> hook,
[[maybe_unused]] llarp_time_t timeout)
{
if (not WantsOutboundSession(remote))
{
2023-10-16 16:55:51 +00:00
// we don't want to ensure paths to addresses that are inbound
// inform fail right away in that case
hook(remote, nullptr);
return false;
}
2023-10-16 16:55:51 +00:00
/// how many routers to use for lookups
static constexpr size_t NumParallelLookups = 2;
// add response hook to list for address.
_state->pending_service_lookups.emplace(remote, hook);
auto& sessions = _state->remote_sessions;
{
auto range = sessions.equal_range(remote);
auto itr = range.first;
while (itr != range.second)
{
2023-10-16 16:55:51 +00:00
if (itr->second->ReadyToSend())
{
2023-10-16 16:55:51 +00:00
InformPathToService(remote, itr->second.get());
return true;
}
++itr;
}
}
2023-10-16 16:55:51 +00:00
/// check replay filter
if (not _introset_lookup_filter.Insert(remote))
return true;
2023-10-16 16:55:51 +00:00
const auto paths = GetManyPathsWithUniqueEndpoints(this, NumParallelLookups);
2023-10-16 16:55:51 +00:00
const dht::Key_t location = remote.ToKey();
2023-10-16 16:55:51 +00:00
// flag to only add callback to list of callbacks for
// address once.
bool hookAdded = false;
2020-02-18 16:00:45 +00:00
2023-10-16 16:55:51 +00:00
for (const auto& path : paths)
{
path->find_intro(location, false, 0, [this, hook](oxen::quic::message m) mutable {
if (m)
{
2023-10-16 16:55:51 +00:00
std::string introset;
try
{
2023-10-16 16:55:51 +00:00
oxenc::bt_dict_consumer btdc{m.body()};
introset = btdc.require<std::string>("INTROSET");
}
2023-10-16 16:55:51 +00:00
catch (...)
2020-03-02 16:12:29 +00:00
{
2023-10-16 16:55:51 +00:00
log::warning(link_cat, "Failed to parse find name response!");
throw;
2020-03-02 16:12:29 +00:00
}
2018-07-19 04:58:39 +00:00
2023-10-16 16:55:51 +00:00
service::EncryptedIntroSet enc{introset};
router()->contacts()->services()->PutNode(std::move(enc));
2023-10-16 16:55:51 +00:00
// TODO: finish this
}
});
}
2023-10-16 16:55:51 +00:00
return hookAdded;
}
2023-10-16 16:55:51 +00:00
void
Endpoint::SRVRecordsChanged()
{
auto& introset = intro_set();
introset.SRVs.clear();
for (const auto& srv : SRVRecords())
introset.SRVs.emplace_back(srv.toTuple());
2023-10-16 16:55:51 +00:00
regen_and_publish_introset();
}
2023-10-12 20:37:45 +00:00
2023-10-16 16:55:51 +00:00
bool
Endpoint::send_to(ConvoTag tag, std::string payload)
{
if (tag.IsZero())
{
log::warning(link_cat, "SendToOrQueue failed: convo tag is zero");
return false;
2021-03-12 17:41:48 +00:00
}
2023-10-16 16:55:51 +00:00
log::debug(link_cat, "{} sending {} bytes (Tag: {})", Name(), payload.size(), tag);
2023-10-16 16:55:51 +00:00
if (auto maybe = GetEndpointWithConvoTag(tag))
{
if (auto rid = std::get_if<RouterID>(&*maybe))
{
2023-10-16 16:55:51 +00:00
return router()->send_data_message(*rid, payload);
}
2023-10-16 16:55:51 +00:00
if (auto saddr = std::get_if<Address>(&*maybe))
{
2023-10-16 16:55:51 +00:00
return router()->send_data_message(saddr->ToRouter(), payload);
}
2023-10-16 16:55:51 +00:00
}
2023-10-16 16:55:51 +00:00
log::debug(link_cat, "SendToOrQueue failed: no endpoint for convo tag {}", tag);
return false;
}
void
Endpoint::Pump(llarp_time_t)
2023-10-16 16:55:51 +00:00
{
FlushRecvData();
// send downstream packets to user for snode
for (const auto& [router, session] : _state->snode_sessions)
session->FlushDownstream();
// handle inbound traffic sorted
util::ascending_priority_queue<ProtocolMessage> queue;
2023-10-16 16:55:51 +00:00
while (not _inbound_queue.empty())
{
// succ it out
queue.emplace(std::move(*_inbound_queue.popFront()));
}
2023-10-16 16:55:51 +00:00
while (not queue.empty())
{
const auto& msg = queue.top();
log::debug(
logcat,
"{} handling inbound packet (size {}B) on tag {}",
2023-10-16 16:55:51 +00:00
Name(),
msg.tag,
msg.payload.size());
// if (HandleInboundPacket(msg.tag, msg.payload, msg.proto, msg.seqno))
// {
// ConvoTagRX(msg.tag);
// }
// else
// {
// LogWarn("Failed to handle inbound message");
// }
2023-10-16 16:55:51 +00:00
queue.pop();
}
2023-10-16 16:55:51 +00:00
auto r = router();
2023-10-16 16:55:51 +00:00
// TODO: locking on this container
// for (const auto& [addr, outctx] : _state->remote_sessions)
// {
// outctx->FlushUpstream();
// outctx->Pump(now);
// }
// // TODO: locking on this container
// for (const auto& [r, session] : _state->snode_sessions)
// session->FlushUpstream();
2023-10-16 16:55:51 +00:00
// // send queue flush
// while (not _send_queue.empty())
// {
// SendEvent item = _send_queue.popFront();
// item.first->sequence_number = item.second->NextSeqNo();
// if (item.second->SendRoutingMessage(*item.first, r))
// ConvoTagTX(item.first->protocol_frame_msg.convo_tag);
// }
2023-10-16 16:55:51 +00:00
UpstreamFlush(r);
}
2019-04-25 17:15:56 +00:00
2023-10-16 16:55:51 +00:00
std::optional<ConvoTag>
Endpoint::GetBestConvoTagFor(std::variant<Address, RouterID> remote) const
{
// get convotag with lowest estimated RTT
if (auto ptr = std::get_if<Address>(&remote))
2021-02-17 19:26:39 +00:00
{
2023-10-16 16:55:51 +00:00
llarp_time_t rtt = 30s;
std::optional<ConvoTag> ret = std::nullopt;
for (const auto& [tag, session] : Sessions())
2021-02-17 19:26:39 +00:00
{
2023-10-16 16:55:51 +00:00
if (tag.IsZero())
continue;
if (session.remote.Addr() == *ptr)
2021-02-17 19:26:39 +00:00
{
2023-10-16 16:55:51 +00:00
if (*ptr == _identity.pub.Addr())
{
return tag;
}
if (session.inbound)
2021-03-16 19:17:02 +00:00
{
2023-10-16 16:55:51 +00:00
auto path = GetPathByRouter(session.replyIntro.router);
// if we have no path to the remote router that's fine still use it just in case this
// is the ONLY one we have
if (path == nullptr)
{
2023-10-16 16:55:51 +00:00
ret = tag;
continue;
}
2023-10-16 16:55:51 +00:00
if (path and path->IsReady())
{
2023-10-16 16:55:51 +00:00
const auto rttEstimate = (session.replyIntro.latency + path->intro.latency) * 2;
if (rttEstimate < rtt)
{
ret = tag;
2023-10-16 16:55:51 +00:00
rtt = rttEstimate;
}
}
2023-10-16 16:55:51 +00:00
}
else
{
auto range = _state->remote_sessions.equal_range(*ptr);
auto itr = range.first;
while (itr != range.second)
{
// TODO:
// if (itr->second->ReadyToSend() and itr->second->estimatedRTT > 0s)
// {
// if (itr->second->estimatedRTT < rtt)
// {
// ret = tag;
// rtt = itr->second->estimatedRTT;
// }
// }
2023-10-16 16:55:51 +00:00
itr++;
}
2021-03-16 19:17:02 +00:00
}
2021-02-17 19:26:39 +00:00
}
2021-03-16 19:17:02 +00:00
}
2023-10-16 16:55:51 +00:00
return ret;
2019-03-08 17:00:13 +00:00
}
2023-10-16 16:55:51 +00:00
if (auto* ptr = std::get_if<RouterID>(&remote))
2018-08-09 19:02:17 +00:00
{
2023-10-16 16:55:51 +00:00
auto itr = _state->snode_sessions.find(*ptr);
if (itr == _state->snode_sessions.end())
return std::nullopt;
2023-10-16 16:55:51 +00:00
if (auto maybe = itr->second->CurrentPath())
return ConvoTag{maybe->as_array()};
2018-08-09 19:02:17 +00:00
}
2023-10-16 16:55:51 +00:00
return std::nullopt;
}
2018-08-09 19:02:17 +00:00
2023-10-16 16:55:51 +00:00
bool
Endpoint::HasConvoTag(const ConvoTag& t) const
{
return Sessions().find(t) != Sessions().end();
}
2023-10-16 16:55:51 +00:00
std::optional<uint64_t>
Endpoint::GetSeqNoForConvo(const ConvoTag& tag)
{
auto itr = Sessions().find(tag);
if (itr == Sessions().end())
return std::nullopt;
return itr->second.seqno++;
}
2023-10-16 16:55:51 +00:00
bool
Endpoint::ShouldBuildMore(llarp_time_t now) const
{
if (BuildCooldownHit(now))
return false;
const auto requiredPaths = std::max(numDesiredPaths, path::MIN_INTRO_PATHS);
if (NumInStatus(path::ePathBuilding) >= requiredPaths)
return false;
return NumPathsExistingAt(now + (path::DEFAULT_LIFETIME - path::INTRO_PATH_SPREAD))
< requiredPaths;
}
Replace libuv with uvw & related refactoring - removes all the llarp_ev_* functions, replacing with methods/classes/functions in the llarp namespace. - banish ev/ev.h to the void - Passes various things by const lvalue ref, especially shared_ptr's that don't need to be copied (to avoid an atomic refcount increment/decrement). - Add a llarp::UDPHandle abstract class for UDP handling - Removes the UDP tick handler; code that needs tick can just do a separate handler on the event loop outside the UDP socket. - Adds an "OwnedBuffer" which owns its own memory but is implicitly convertible to a llarp_buffer_t. This is mostly needed to take over ownership of buffers from uvw without copying them as, currently, uvw does its own allocation (pending some open upstream issues/PRs). - Logic: - add `make_caller`/`call_forever`/`call_every` utility functions to abstract Call wrapping and dependent timed tasks. - Add inLogicThread() so that code can tell its inside the logic thread (typically for debugging assertions). - get rid of janky integer returns and dealing with cancellations on call_later: the other methods added here and the event loop code remove the need for them. - Event loop: - redo everything with uvw instead of libuv - rename EventLoopWakeup::Wakeup to EventLoopWakeup::Trigger to better reflect what it does. - add EventLoopRepeater for repeated events, and replace the code that reschedules itself every time it is called with a repeater. - Split up `EventLoop::run()` into a non-virtual base method and abstract `run_loop()` methods; the base method does a couple extra setup/teardown things that don't need to be in the derived class. - udp_listen is replaced with ev->udp(...) which returns a new UDPHandle object rather that needing gross C-style-but-not-actually-C-compatible structs. - Remove unused register_poll_fd_(un)readable - Use shared_ptr for EventLoopWakeup rather than returning a raw pointer; uvw lets us not have to worry about having the event loop class maintain ownership of it. - Add factory EventLoop::create() function to create a default (uvw-based) event loop (previously this was one of the llarp_ev_blahblah unnamespaced functions). - ev_libuv: this is mostly rewritten; all of the glue code/structs, in particular, are gone as they are no longer needed with uvw. - DNS: - Rename DnsHandler to DnsInterceptor to better describe what it does (this is the code that intercepts all DNS to the tun IP range for Android). - endpoint: - remove unused "isolated network" code - remove distinct (but actually always the same) variables for router/endpoint logic objects - llarp_buffer_t - make constructors type-safe against being called with points to non-size-1 values - tun packet reading: - read all available packets off the device/file descriptor; previously we were reading one packet at a time then returning to the event loop to poll again. - ReadNextPacket() now returns a 0-size packet if the read would block (so that we can implement the previous point). - ReadNextPacket() now throws on I/O error - Miscellaneous code cleanups/simplifications
2021-03-02 02:06:20 +00:00
2023-10-16 16:55:51 +00:00
Router*
Endpoint::router()
{
return _state->router;
}
2020-05-21 14:18:23 +00:00
2023-10-16 16:55:51 +00:00
const EventLoop_ptr&
Endpoint::Loop()
{
return router()->loop();
}
2023-10-16 16:55:51 +00:00
void
Endpoint::BlacklistSNode(const RouterID snode)
{
_state->snode_blacklist.insert(snode);
}
2023-10-16 16:55:51 +00:00
const std::set<RouterID>&
Endpoint::SnodeBlacklist() const
{
return _state->snode_blacklist;
}
2023-10-16 16:55:51 +00:00
const IntroSet&
Endpoint::intro_set() const
{
return _state->local_introset;
}
2023-10-16 16:55:51 +00:00
IntroSet&
Endpoint::intro_set()
{
return _state->local_introset;
}
2023-10-16 16:55:51 +00:00
const std::unordered_map<ConvoTag, Session>&
Endpoint::Sessions() const
{
return _state->m_Sessions;
}
2023-10-16 16:55:51 +00:00
std::unordered_map<ConvoTag, Session>&
Endpoint::Sessions()
{
return _state->m_Sessions;
}
2023-10-16 16:55:51 +00:00
void
Endpoint::SetAuthInfoForEndpoint(Address addr, AuthInfo info)
{
if (info.token.empty())
{
2023-10-16 16:55:51 +00:00
_remote_auth_infos.erase(addr);
return;
}
2023-10-16 16:55:51 +00:00
_remote_auth_infos[addr] = std::move(info);
}
2023-10-16 16:55:51 +00:00
void
Endpoint::MapExitRange(IPRange range, Address exit)
{
if (not exit.IsZero())
LogInfo(Name(), " map ", range, " to exit at ", exit);
_exit_map.Insert(range, exit);
}
bool
Endpoint::HasFlowToService(Address addr) const
{
return HasOutboundConvo(addr) or HasInboundConvo(addr);
}
void
Endpoint::UnmapExitRange(IPRange range)
{
// unmap all ranges that fit in the range we gave
_exit_map.RemoveIf([&](const auto& item) -> bool {
if (not range.Contains(item.first))
return false;
2023-10-16 16:55:51 +00:00
LogInfo(Name(), " unmap ", item.first, " exit range mapping");
return true;
});
2023-10-16 16:55:51 +00:00
if (_exit_map.Empty())
router()->route_poker()->put_down();
}
2023-10-16 16:55:51 +00:00
void
Endpoint::UnmapRangeByExit(IPRange range, std::string exit)
{
// unmap all ranges that match the given exit when hot swapping
_exit_map.RemoveIf([&](const auto& item) -> bool {
if ((range.Contains(item.first)) and (item.second.ToString() == exit))
{
log::info(logcat, "{} unmap {} range mapping to exit node {}", Name(), item.first, exit);
return true;
}
return false;
});
2023-10-16 16:55:51 +00:00
if (_exit_map.Empty())
router()->route_poker()->put_down();
}
std::optional<AuthInfo>
Endpoint::MaybeGetAuthInfoForEndpoint(Address remote)
{
const auto itr = _remote_auth_infos.find(remote);
2023-10-16 16:55:51 +00:00
if (itr == _remote_auth_infos.end())
return std::nullopt;
2023-10-16 16:55:51 +00:00
return itr->second;
}
link::TunnelManager*
2023-10-16 16:55:51 +00:00
Endpoint::GetQUICTunnel()
{
return _tunnel_manager.get();
}
QUIC lokinet integration refactor Refactors how quic packets get handled: the actual tunnels now live in tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp tunnelling. service::Endpoint now holds a TunnelManager rather than a quic::Server. We only need one quic server, but we need a separate quic client instance per outgoing quic tunnel, and TunnelManager handles all that glue now. Adds QUIC packet handling to get to the right tunnel code. This required multiplexing incoming quic packets, as follows: Adds a very small quic tunnel packet header of 4 bytes: [1, SPORT, ECN] for client->server packets, where SPORT is our source "port" (really: just a uint16_t unique quic instance identifier) or [2, DPORT, ECN] for server->client packets where the DPORT is the SPORT from above. (This also reworks ECN bits to get properly carried over lokinet.) We don't need a destination/source port for the server-side because there is only ever one quic server (and we know we're going to it when the first byte of the header is 1). Removes the config option for quic exposing ports; a full lokinet will simply accept anything incoming on quic and tunnel it to the requested port on the the local endpoint IP (this handler will come in a following commit). Replace ConvoTags with full addresses: we need to carry the port, as well, which the ConvoTag can't give us, so change those to more general SockAddrs from which we can extract both the ConvoTag *and* the port. Add a pending connection queue along with new quic-side handlers to call when a stream becomes available (TunnelManager uses this to wire up pending incoming conns with quic streams as streams open up). Completely get rid of tunnel_server/tunnel_client.cpp code; it is now moved to tunnel.hpp. Add listen()/forget() methods in TunnelManager for setting up quic listening sockets (for liblokinet usage). Add open()/close() methods in TunnelManager for spinning up new quic clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
2023-10-16 16:55:51 +00:00
} // namespace llarp::service