2020-02-24 18:15:12 +00:00
|
|
|
#include <chrono>
|
2020-03-12 17:50:46 +00:00
|
|
|
#include <memory>
|
2021-03-09 22:24:35 +00:00
|
|
|
#include "endpoint.hpp"
|
2019-01-10 19:41:51 +00:00
|
|
|
|
2021-03-09 22:24:35 +00:00
|
|
|
#include <llarp/dht/context.hpp>
|
|
|
|
#include <llarp/dht/key.hpp>
|
|
|
|
#include <llarp/dht/messages/findintro.hpp>
|
|
|
|
#include <llarp/dht/messages/findname.hpp>
|
|
|
|
#include <llarp/dht/messages/findrouter.hpp>
|
|
|
|
#include <llarp/dht/messages/gotintro.hpp>
|
|
|
|
#include <llarp/dht/messages/gotname.hpp>
|
|
|
|
#include <llarp/dht/messages/gotrouter.hpp>
|
|
|
|
#include <llarp/dht/messages/pubintro.hpp>
|
|
|
|
#include <llarp/nodedb.hpp>
|
|
|
|
#include <llarp/profiling.hpp>
|
|
|
|
#include <llarp/router/abstractrouter.hpp>
|
|
|
|
#include <llarp/routing/dht_message.hpp>
|
|
|
|
#include <llarp/routing/path_transfer_message.hpp>
|
|
|
|
#include "endpoint_state.hpp"
|
|
|
|
#include "endpoint_util.hpp"
|
|
|
|
#include "hidden_service_address_lookup.hpp"
|
2021-03-12 17:41:48 +00:00
|
|
|
#include "net/ip.hpp"
|
2021-03-09 22:24:35 +00:00
|
|
|
#include "outbound_context.hpp"
|
|
|
|
#include "protocol.hpp"
|
2021-03-12 17:41:48 +00:00
|
|
|
#include "service/info.hpp"
|
2021-03-16 11:56:27 +00:00
|
|
|
#include "service/protocol_type.hpp"
|
2021-03-09 22:24:35 +00:00
|
|
|
#include <llarp/util/str.hpp>
|
|
|
|
#include <llarp/util/buffer.hpp>
|
|
|
|
#include <llarp/util/meta/memfn.hpp>
|
|
|
|
#include <llarp/link/link_manager.hpp>
|
|
|
|
#include <llarp/tooling/dht_event.hpp>
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
#include <llarp/quic/tunnel.hpp>
|
2022-04-04 21:50:20 +00:00
|
|
|
#include <llarp/util/priority_queue.hpp>
|
2019-09-01 13:26:16 +00:00
|
|
|
|
2021-03-15 16:01:19 +00:00
|
|
|
#include <optional>
|
2019-07-30 23:42:13 +00:00
|
|
|
#include <utility>
|
2019-01-10 19:41:51 +00:00
|
|
|
|
2021-03-12 17:41:48 +00:00
|
|
|
#include <llarp/quic/server.hpp>
|
|
|
|
#include <llarp/quic/tunnel.hpp>
|
|
|
|
#include <uvw.hpp>
|
2021-03-16 19:50:37 +00:00
|
|
|
#include <variant>
|
2021-03-12 17:41:48 +00:00
|
|
|
|
2018-07-11 16:11:19 +00:00
|
|
|
namespace llarp
|
|
|
|
{
|
|
|
|
namespace service
|
|
|
|
{
|
2022-10-27 22:11:11 +00:00
|
|
|
static auto logcat = log::Cat("endpoint");
|
|
|
|
|
2020-04-30 19:40:20 +00:00
|
|
|
Endpoint::Endpoint(AbstractRouter* r, Context* parent)
|
2021-05-01 12:59:56 +00:00
|
|
|
: path::Builder{r, 3, path::default_len}
|
|
|
|
, context{parent}
|
|
|
|
, m_InboundTrafficQueue{512}
|
|
|
|
, m_SendQueue{512}
|
|
|
|
, m_RecvQueue{512}
|
|
|
|
, m_IntrosetLookupFilter{5s}
|
2018-07-11 16:11:19 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
m_state = std::make_unique<EndpointState>();
|
2019-07-15 09:15:51 +00:00
|
|
|
m_state->m_Router = r;
|
2020-04-30 19:40:20 +00:00
|
|
|
m_state->m_Name = "endpoint";
|
2019-11-28 23:08:02 +00:00
|
|
|
m_RecvQueue.enable();
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
|
|
|
|
if (Loop()->MaybeGetUVWLoop())
|
|
|
|
m_quic = std::make_unique<quic::TunnelManager>(*this);
|
2018-07-11 16:11:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2020-05-17 17:44:00 +00:00
|
|
|
Endpoint::Configure(const NetworkConfig& conf, [[maybe_unused]] const DnsConfig& dnsConf)
|
2018-07-11 16:11:19 +00:00
|
|
|
{
|
2020-06-08 12:42:10 +00:00
|
|
|
if (conf.m_Paths.has_value())
|
2020-10-27 20:27:14 +00:00
|
|
|
numDesiredPaths = *conf.m_Paths;
|
2020-05-01 17:03:47 +00:00
|
|
|
|
2020-06-08 12:42:10 +00:00
|
|
|
if (conf.m_Hops.has_value())
|
|
|
|
numHops = *conf.m_Hops;
|
2020-05-01 17:03:47 +00:00
|
|
|
|
2020-06-24 13:24:07 +00:00
|
|
|
conf.m_ExitMap.ForEachEntry(
|
|
|
|
[&](const IPRange& range, const service::Address& addr) { MapExitRange(range, addr); });
|
|
|
|
|
2020-08-27 11:12:56 +00:00
|
|
|
for (auto [exit, auth] : conf.m_ExitAuths)
|
|
|
|
{
|
|
|
|
SetAuthInfoForEndpoint(exit, auth);
|
|
|
|
}
|
|
|
|
|
2020-10-21 09:39:01 +00:00
|
|
|
conf.m_LNSExitMap.ForEachEntry([&](const IPRange& range, const std::string& name) {
|
|
|
|
std::optional<AuthInfo> auth;
|
|
|
|
const auto itr = conf.m_LNSExitAuths.find(name);
|
|
|
|
if (itr != conf.m_LNSExitAuths.end())
|
|
|
|
auth = itr->second;
|
|
|
|
m_StartupLNSMappings[name] = std::make_pair(range, auth);
|
|
|
|
});
|
|
|
|
|
2020-05-14 17:51:27 +00:00
|
|
|
return m_state->Configure(conf);
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
|
|
|
|
2018-08-10 03:51:38 +00:00
|
|
|
bool
|
|
|
|
Endpoint::HasPendingPathToService(const Address& addr) const
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
return m_state->m_PendingServiceLookups.find(addr) != m_state->m_PendingServiceLookups.end();
|
2018-08-10 03:51:38 +00:00
|
|
|
}
|
|
|
|
|
2018-09-11 13:21:35 +00:00
|
|
|
void
|
2021-03-31 16:06:50 +00:00
|
|
|
Endpoint::RegenAndPublishIntroSet()
|
2018-09-11 13:21:35 +00:00
|
|
|
{
|
2019-11-05 16:58:53 +00:00
|
|
|
const auto now = llarp::time_now_ms();
|
2021-03-31 16:06:50 +00:00
|
|
|
m_LastIntrosetRegenAttempt = now;
|
2021-06-02 19:52:13 +00:00
|
|
|
std::set<Introduction, CompareIntroTimestamp> intros;
|
|
|
|
if (const auto maybe =
|
|
|
|
GetCurrentIntroductionsWithFilter([now](const service::Introduction& intro) -> bool {
|
2021-11-16 17:21:11 +00:00
|
|
|
return not intro.ExpiresSoon(now, path::intro_stale_threshold);
|
2020-04-07 18:38:56 +00:00
|
|
|
}))
|
2021-06-02 19:52:13 +00:00
|
|
|
{
|
|
|
|
intros.insert(maybe->begin(), maybe->end());
|
|
|
|
}
|
|
|
|
else
|
2018-09-11 13:21:35 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
LogWarn(
|
|
|
|
"could not publish descriptors for endpoint ",
|
|
|
|
Name(),
|
|
|
|
" because we couldn't get enough valid introductions");
|
2021-04-12 11:39:07 +00:00
|
|
|
BuildOne();
|
2018-09-11 13:21:35 +00:00
|
|
|
return;
|
|
|
|
}
|
2021-04-14 15:07:06 +00:00
|
|
|
|
2021-04-14 19:40:57 +00:00
|
|
|
introSet().supportedProtocols.clear();
|
|
|
|
|
2021-04-14 15:07:06 +00:00
|
|
|
// add supported ethertypes
|
|
|
|
if (HasIfAddr())
|
|
|
|
{
|
2022-07-28 16:07:38 +00:00
|
|
|
if (IPRange::V4MappedRange().Contains(GetIfAddr()))
|
2021-04-14 15:07:06 +00:00
|
|
|
{
|
|
|
|
introSet().supportedProtocols.push_back(ProtocolType::TrafficV4);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
introSet().supportedProtocols.push_back(ProtocolType::TrafficV6);
|
|
|
|
}
|
|
|
|
|
|
|
|
// exit related stuffo
|
|
|
|
if (m_state->m_ExitEnabled)
|
|
|
|
{
|
|
|
|
introSet().supportedProtocols.push_back(ProtocolType::Exit);
|
|
|
|
introSet().exitTrafficPolicy = GetExitPolicy();
|
|
|
|
introSet().ownedRanges = GetOwnedRanges();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// add quic ethertype if we have listeners set up
|
|
|
|
if (auto* quic = GetQUICTunnel())
|
|
|
|
{
|
|
|
|
if (quic->hasListeners())
|
|
|
|
introSet().supportedProtocols.push_back(ProtocolType::QUIC);
|
|
|
|
}
|
|
|
|
|
|
|
|
introSet().intros.clear();
|
2021-06-02 19:52:13 +00:00
|
|
|
for (auto& intro : intros)
|
2018-09-11 13:21:35 +00:00
|
|
|
{
|
2021-06-02 19:52:13 +00:00
|
|
|
if (introSet().intros.size() < numDesiredPaths)
|
|
|
|
introSet().intros.emplace_back(std::move(intro));
|
2018-09-11 13:21:35 +00:00
|
|
|
}
|
2021-04-14 15:07:06 +00:00
|
|
|
if (introSet().intros.empty())
|
2018-09-11 13:21:35 +00:00
|
|
|
{
|
2019-04-21 15:40:32 +00:00
|
|
|
LogWarn("not enough intros to publish introset for ", Name());
|
2021-03-31 16:06:50 +00:00
|
|
|
if (ShouldBuildMore(now))
|
2019-05-02 13:47:22 +00:00
|
|
|
ManualRebuild(1);
|
2018-09-11 13:21:35 +00:00
|
|
|
return;
|
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
auto maybe = m_Identity.EncryptAndSignIntroSet(introSet(), now);
|
2020-05-20 19:46:08 +00:00
|
|
|
if (not maybe)
|
2018-09-11 13:21:35 +00:00
|
|
|
{
|
2020-01-27 21:30:41 +00:00
|
|
|
LogWarn("failed to generate introset for endpoint ", Name());
|
2018-09-11 13:21:35 +00:00
|
|
|
return;
|
|
|
|
}
|
2020-05-20 19:46:08 +00:00
|
|
|
if (PublishIntroSet(*maybe, Router()))
|
2018-09-11 13:21:35 +00:00
|
|
|
{
|
2019-04-21 15:40:32 +00:00
|
|
|
LogInfo("(re)publishing introset for endpoint ", Name());
|
2018-09-11 13:21:35 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-04-21 15:40:32 +00:00
|
|
|
LogWarn("failed to publish intro set for endpoint ", Name());
|
2018-09-11 13:21:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-22 12:25:25 +00:00
|
|
|
bool
|
|
|
|
Endpoint::IsReady() const
|
|
|
|
{
|
|
|
|
const auto now = Now();
|
2021-04-14 15:07:06 +00:00
|
|
|
if (introSet().intros.empty())
|
2019-04-22 12:25:25 +00:00
|
|
|
return false;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (introSet().IsExpired(now))
|
2019-04-22 12:25:25 +00:00
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-10 12:47:21 +00:00
|
|
|
bool
|
|
|
|
Endpoint::HasPendingRouterLookup(const RouterID remote) const
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
const auto& routers = m_state->m_PendingRouters;
|
|
|
|
return routers.find(remote) != routers.end();
|
2019-06-10 12:47:21 +00:00
|
|
|
}
|
|
|
|
|
2021-03-15 16:01:19 +00:00
|
|
|
std::optional<std::variant<Address, RouterID>>
|
|
|
|
Endpoint::GetEndpointWithConvoTag(ConvoTag tag) const
|
2019-07-01 14:56:56 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr != Sessions().end())
|
2019-07-01 14:56:56 +00:00
|
|
|
{
|
2021-03-15 16:01:19 +00:00
|
|
|
return itr->second.remote.Addr();
|
2019-07-01 14:56:56 +00:00
|
|
|
}
|
2019-07-30 23:42:13 +00:00
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& item : m_state->m_SNodeSessions)
|
2019-07-01 14:56:56 +00:00
|
|
|
{
|
2021-04-07 12:03:04 +00:00
|
|
|
if (const auto maybe = item.second->CurrentPath())
|
2019-07-01 14:56:56 +00:00
|
|
|
{
|
2021-04-07 12:03:04 +00:00
|
|
|
if (ConvoTag{maybe->as_array()} == tag)
|
|
|
|
return item.first;
|
2019-07-01 14:56:56 +00:00
|
|
|
}
|
|
|
|
}
|
2021-03-15 16:01:19 +00:00
|
|
|
return std::nullopt;
|
2019-07-01 14:56:56 +00:00
|
|
|
}
|
|
|
|
|
2021-04-12 11:39:07 +00:00
|
|
|
void
|
|
|
|
Endpoint::LookupServiceAsync(
|
|
|
|
std::string name,
|
|
|
|
std::string service,
|
|
|
|
std::function<void(std::vector<dns::SRVData>)> resultHandler)
|
|
|
|
{
|
2022-04-16 16:41:34 +00:00
|
|
|
// handles when we aligned to a loki address
|
|
|
|
auto handleGotPathToService = [resultHandler, service, this](auto addr) {
|
|
|
|
// we can probably get this info before we have a path to them but we do this after we
|
|
|
|
// have a path so when we send the response back they can send shit to them immediately
|
|
|
|
const auto& container = m_state->m_RemoteSessions;
|
|
|
|
if (auto itr = container.find(addr); itr != container.end())
|
|
|
|
{
|
|
|
|
// parse the stuff we need from this guy
|
|
|
|
resultHandler(itr->second->GetCurrentIntroSet().GetMatchingSRVRecords(service));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
resultHandler({});
|
|
|
|
};
|
|
|
|
|
|
|
|
// handles when we resolved a .snode
|
|
|
|
auto handleResolvedSNodeName = [resultHandler, nodedb = Router()->nodedb()](auto router_id) {
|
|
|
|
std::vector<dns::SRVData> result{};
|
|
|
|
if (auto maybe_rc = nodedb->Get(router_id))
|
|
|
|
{
|
|
|
|
result = maybe_rc->srvRecords;
|
|
|
|
}
|
|
|
|
resultHandler(std::move(result));
|
|
|
|
};
|
|
|
|
|
|
|
|
// handles when we got a path to a remote thing
|
|
|
|
auto handleGotPathTo = [handleGotPathToService, handleResolvedSNodeName, resultHandler](
|
|
|
|
auto maybe_tag, auto address) {
|
|
|
|
if (not maybe_tag)
|
|
|
|
{
|
|
|
|
resultHandler({});
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (auto* addr = std::get_if<Address>(&address))
|
|
|
|
{
|
|
|
|
// .loki case
|
|
|
|
handleGotPathToService(*addr);
|
|
|
|
}
|
|
|
|
else if (auto* router_id = std::get_if<RouterID>(&address))
|
|
|
|
{
|
|
|
|
// .snode case
|
|
|
|
handleResolvedSNodeName(*router_id);
|
|
|
|
}
|
2021-04-12 11:39:07 +00:00
|
|
|
else
|
|
|
|
{
|
2022-04-16 16:41:34 +00:00
|
|
|
// fallback case
|
|
|
|
// XXX: never should happen but we'll handle it anyways
|
|
|
|
resultHandler({});
|
2021-04-12 11:39:07 +00:00
|
|
|
}
|
|
|
|
};
|
2022-04-16 16:41:34 +00:00
|
|
|
|
|
|
|
// handles when we know a long address of a remote resource
|
|
|
|
auto handleGotAddress = [resultHandler, handleGotPathTo, this](auto address) {
|
|
|
|
// we will attempt a build to whatever we looked up
|
|
|
|
const auto result = EnsurePathTo(
|
|
|
|
address,
|
|
|
|
[address, handleGotPathTo](auto maybe_tag) { handleGotPathTo(maybe_tag, address); },
|
|
|
|
PathAlignmentTimeout());
|
|
|
|
|
|
|
|
// on path build start fail short circuit
|
|
|
|
if (not result)
|
|
|
|
resultHandler({});
|
|
|
|
};
|
|
|
|
|
|
|
|
// look up this name async and start the entire chain of events
|
|
|
|
LookupNameAsync(name, [handleGotAddress, resultHandler](auto maybe_addr) {
|
|
|
|
if (maybe_addr)
|
|
|
|
{
|
|
|
|
handleGotAddress(*maybe_addr);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
resultHandler({});
|
|
|
|
}
|
|
|
|
});
|
2021-04-12 11:39:07 +00:00
|
|
|
}
|
|
|
|
|
2019-04-22 12:25:25 +00:00
|
|
|
bool
|
|
|
|
Endpoint::IntrosetIsStale() const
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
return introSet().HasExpiredIntros(Now());
|
2019-04-22 12:25:25 +00:00
|
|
|
}
|
|
|
|
|
2019-02-11 17:14:43 +00:00
|
|
|
util::StatusObject
|
|
|
|
Endpoint::ExtractStatus() const
|
2019-02-08 19:43:25 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
auto obj = path::Builder::ExtractStatus();
|
2020-09-03 22:22:22 +00:00
|
|
|
obj["exitMap"] = m_ExitMap.ExtractStatus();
|
2019-08-19 09:33:26 +00:00
|
|
|
obj["identity"] = m_Identity.pub.Addr().ToString();
|
2022-11-03 13:05:33 +00:00
|
|
|
obj["networkReady"] = ReadyForNetwork();
|
2020-09-29 13:26:45 +00:00
|
|
|
|
|
|
|
util::StatusObject authCodes;
|
|
|
|
for (const auto& [service, info] : m_RemoteAuthInfos)
|
|
|
|
{
|
|
|
|
authCodes[service.ToString()] = info.token;
|
|
|
|
}
|
|
|
|
obj["authCodes"] = authCodes;
|
|
|
|
|
2019-07-15 09:15:51 +00:00
|
|
|
return m_state->ExtractStatus(obj);
|
2019-02-08 19:43:25 +00:00
|
|
|
}
|
|
|
|
|
2022-10-20 22:23:14 +00:00
|
|
|
void
|
|
|
|
Endpoint::Tick(llarp_time_t)
|
2018-07-11 16:11:19 +00:00
|
|
|
{
|
2019-11-05 16:58:53 +00:00
|
|
|
const auto now = llarp::time_now_ms();
|
2019-04-23 16:13:22 +00:00
|
|
|
path::Builder::Tick(now);
|
2018-07-19 04:58:39 +00:00
|
|
|
// publish descriptors
|
2020-04-07 18:38:56 +00:00
|
|
|
if (ShouldPublishDescriptors(now))
|
2018-07-11 16:11:19 +00:00
|
|
|
{
|
2019-11-05 16:58:53 +00:00
|
|
|
RegenAndPublishIntroSet();
|
2018-07-11 16:11:19 +00:00
|
|
|
}
|
2021-05-01 12:59:56 +00:00
|
|
|
// decay introset lookup filter
|
|
|
|
m_IntrosetLookupFilter.Decay(now);
|
2020-09-17 19:18:08 +00:00
|
|
|
// expire name cache
|
|
|
|
m_state->nameCache.Decay(now);
|
2018-12-13 12:27:14 +00:00
|
|
|
// expire snode sessions
|
2019-07-15 09:15:51 +00:00
|
|
|
EndpointUtil::ExpireSNodeSessions(now, m_state->m_SNodeSessions);
|
2018-07-23 07:38:29 +00:00
|
|
|
// expire pending tx
|
2019-07-15 09:15:51 +00:00
|
|
|
EndpointUtil::ExpirePendingTx(now, m_state->m_PendingLookups);
|
2018-08-14 21:17:18 +00:00
|
|
|
// expire pending router lookups
|
2019-07-15 09:15:51 +00:00
|
|
|
EndpointUtil::ExpirePendingRouterLookups(now, m_state->m_PendingRouters);
|
2018-08-14 21:17:18 +00:00
|
|
|
|
2019-02-05 14:50:33 +00:00
|
|
|
// deregister dead sessions
|
2019-07-15 09:15:51 +00:00
|
|
|
EndpointUtil::DeregisterDeadSessions(now, m_state->m_DeadSessions);
|
2018-07-23 07:38:29 +00:00
|
|
|
// tick remote sessions
|
2020-04-07 18:38:56 +00:00
|
|
|
EndpointUtil::TickRemoteSessions(
|
|
|
|
now, m_state->m_RemoteSessions, m_state->m_DeadSessions, Sessions());
|
2019-02-09 14:37:24 +00:00
|
|
|
// expire convotags
|
2019-07-15 09:15:51 +00:00
|
|
|
EndpointUtil::ExpireConvoSessions(now, Sessions());
|
2020-10-21 09:39:01 +00:00
|
|
|
|
|
|
|
if (NumInStatus(path::ePathEstablished) > 1)
|
|
|
|
{
|
|
|
|
for (const auto& item : m_StartupLNSMappings)
|
|
|
|
{
|
|
|
|
LookupNameAsync(
|
|
|
|
item.first, [name = item.first, info = item.second, this](auto maybe_addr) {
|
|
|
|
if (maybe_addr.has_value())
|
|
|
|
{
|
|
|
|
const auto maybe_range = info.first;
|
|
|
|
const auto maybe_auth = info.second;
|
|
|
|
|
|
|
|
m_StartupLNSMappings.erase(name);
|
2021-03-19 20:06:03 +00:00
|
|
|
if (auto* addr = std::get_if<service::Address>(&*maybe_addr))
|
|
|
|
{
|
|
|
|
if (maybe_range.has_value())
|
|
|
|
m_ExitMap.Insert(*maybe_range, *addr);
|
|
|
|
if (maybe_auth.has_value())
|
|
|
|
SetAuthInfoForEndpoint(*addr, *maybe_auth);
|
|
|
|
}
|
2020-10-21 09:39:01 +00:00
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
2018-09-24 15:52:25 +00:00
|
|
|
}
|
|
|
|
|
2018-12-24 16:09:05 +00:00
|
|
|
bool
|
|
|
|
Endpoint::Stop()
|
|
|
|
{
|
|
|
|
// stop remote sessions
|
2022-10-31 16:46:21 +00:00
|
|
|
log::debug(logcat, "Endpoint stopping remote sessions.");
|
2019-07-15 09:15:51 +00:00
|
|
|
EndpointUtil::StopRemoteSessions(m_state->m_RemoteSessions);
|
2018-12-24 16:09:05 +00:00
|
|
|
// stop snode sessions
|
2022-10-31 16:46:21 +00:00
|
|
|
log::debug(logcat, "Endpoint stopping snode sessions.");
|
2019-07-15 09:15:51 +00:00
|
|
|
EndpointUtil::StopSnodeSessions(m_state->m_SNodeSessions);
|
2022-10-31 16:46:21 +00:00
|
|
|
log::debug(logcat, "Endpoint stopping its path builder.");
|
2019-04-21 15:40:32 +00:00
|
|
|
return path::Builder::Stop();
|
2018-12-24 16:09:05 +00:00
|
|
|
}
|
|
|
|
|
2018-07-18 03:10:21 +00:00
|
|
|
uint64_t
|
|
|
|
Endpoint::GenTXID()
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
uint64_t txid = randint();
|
2019-07-15 09:15:51 +00:00
|
|
|
const auto& lookups = m_state->m_PendingLookups;
|
2020-04-07 18:38:56 +00:00
|
|
|
while (lookups.find(txid) != lookups.end())
|
2018-07-18 03:10:21 +00:00
|
|
|
++txid;
|
|
|
|
return txid;
|
2018-07-11 16:11:19 +00:00
|
|
|
}
|
|
|
|
|
2018-07-16 03:32:13 +00:00
|
|
|
std::string
|
|
|
|
Endpoint::Name() const
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
return m_state->m_Name + ":" + m_Identity.pub.Name();
|
2018-07-16 03:32:13 +00:00
|
|
|
}
|
|
|
|
|
2018-08-04 02:59:32 +00:00
|
|
|
void
|
|
|
|
Endpoint::PutLookup(IServiceLookup* lookup, uint64_t txid)
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
m_state->m_PendingLookups.emplace(txid, std::unique_ptr<IServiceLookup>(lookup));
|
2018-08-04 02:59:32 +00:00
|
|
|
}
|
|
|
|
|
2018-07-11 16:11:19 +00:00
|
|
|
bool
|
2019-05-03 13:15:03 +00:00
|
|
|
Endpoint::HandleGotIntroMessage(dht::GotIntroMessage_constptr msg)
|
2018-07-11 16:11:19 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
std::set<EncryptedIntroSet> remote;
|
|
|
|
for (const auto& introset : msg->found)
|
2018-07-11 16:11:19 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (not introset.Verify(Now()))
|
2018-07-11 16:11:19 +00:00
|
|
|
{
|
2020-01-27 21:30:41 +00:00
|
|
|
LogError(Name(), " got invalid introset");
|
|
|
|
return false;
|
2018-07-18 22:50:05 +00:00
|
|
|
}
|
2019-07-06 17:03:40 +00:00
|
|
|
remote.insert(introset);
|
2018-07-11 16:11:19 +00:00
|
|
|
}
|
2019-07-15 09:15:51 +00:00
|
|
|
auto& lookups = m_state->m_PendingLookups;
|
2020-04-07 18:38:56 +00:00
|
|
|
auto itr = lookups.find(msg->txid);
|
|
|
|
if (itr == lookups.end())
|
2018-07-18 03:10:21 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
LogWarn(
|
|
|
|
"invalid lookup response for hidden service endpoint ", Name(), " txid=", msg->txid);
|
2018-07-20 04:50:28 +00:00
|
|
|
return true;
|
2018-07-18 03:10:21 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
std::unique_ptr<IServiceLookup> lookup = std::move(itr->second);
|
2019-07-15 09:15:51 +00:00
|
|
|
lookups.erase(itr);
|
2020-11-04 16:08:29 +00:00
|
|
|
lookup->HandleIntrosetResponse(remote);
|
2018-08-14 21:17:18 +00:00
|
|
|
return true;
|
2018-07-11 16:11:19 +00:00
|
|
|
}
|
|
|
|
|
2019-06-14 12:49:45 +00:00
|
|
|
bool
|
|
|
|
Endpoint::HasInboundConvo(const Address& addr) const
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& item : Sessions())
|
2019-06-14 12:49:45 +00:00
|
|
|
{
|
2021-06-14 12:02:10 +00:00
|
|
|
if (item.second.remote.Addr() == addr and item.second.inbound)
|
2019-06-14 12:49:45 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-03-31 10:57:06 +00:00
|
|
|
bool
|
|
|
|
Endpoint::HasOutboundConvo(const Address& addr) const
|
|
|
|
{
|
|
|
|
for (const auto& item : Sessions())
|
|
|
|
{
|
|
|
|
if (item.second.remote.Addr() == addr && not item.second.inbound)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-08-09 19:02:17 +00:00
|
|
|
void
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::PutSenderFor(const ConvoTag& tag, const ServiceInfo& info, bool inbound)
|
2018-08-09 19:02:17 +00:00
|
|
|
{
|
2021-06-14 13:49:54 +00:00
|
|
|
if (info.Addr().IsZero())
|
|
|
|
{
|
|
|
|
LogError(Name(), " cannot put invalid service info ", info, " T=", tag);
|
|
|
|
return;
|
|
|
|
}
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2021-06-14 13:49:54 +00:00
|
|
|
if (itr == Sessions().end())
|
2018-08-09 19:02:17 +00:00
|
|
|
{
|
2021-06-14 13:49:54 +00:00
|
|
|
if (WantsOutboundSession(info.Addr()) and inbound)
|
|
|
|
{
|
|
|
|
LogWarn(
|
|
|
|
Name(),
|
|
|
|
" not adding sender for ",
|
|
|
|
info.Addr(),
|
|
|
|
" session is inbound and we want outbound T=",
|
|
|
|
tag);
|
|
|
|
return;
|
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
itr = Sessions().emplace(tag, Session{}).first;
|
2019-07-01 13:44:25 +00:00
|
|
|
itr->second.inbound = inbound;
|
2020-04-07 18:38:56 +00:00
|
|
|
itr->second.remote = info;
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-28 12:15:55 +00:00
|
|
|
size_t
|
|
|
|
Endpoint::RemoveAllConvoTagsFor(service::Address remote)
|
|
|
|
{
|
|
|
|
size_t removed = 0;
|
|
|
|
auto& sessions = Sessions();
|
|
|
|
auto itr = sessions.begin();
|
|
|
|
while (itr != sessions.end())
|
|
|
|
{
|
|
|
|
if (itr->second.remote.Addr() == remote)
|
|
|
|
{
|
|
|
|
itr = sessions.erase(itr);
|
|
|
|
removed++;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
++itr;
|
|
|
|
}
|
|
|
|
return removed;
|
|
|
|
}
|
|
|
|
|
2018-08-09 19:02:17 +00:00
|
|
|
bool
|
|
|
|
Endpoint::GetSenderFor(const ConvoTag& tag, ServiceInfo& si) const
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr == Sessions().end())
|
2018-08-09 19:02:17 +00:00
|
|
|
return false;
|
|
|
|
si = itr->second.remote;
|
2021-10-12 21:37:01 +00:00
|
|
|
si.UpdateAddr();
|
2018-08-09 19:02:17 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::PutIntroFor(const ConvoTag& tag, const Introduction& intro)
|
|
|
|
{
|
2021-03-27 18:54:09 +00:00
|
|
|
auto& s = Sessions()[tag];
|
|
|
|
s.intro = intro;
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Endpoint::GetIntroFor(const ConvoTag& tag, Introduction& intro) const
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr == Sessions().end())
|
2018-08-09 19:02:17 +00:00
|
|
|
return false;
|
|
|
|
intro = itr->second.intro;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-02-21 16:45:33 +00:00
|
|
|
void
|
|
|
|
Endpoint::PutReplyIntroFor(const ConvoTag& tag, const Introduction& intro)
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr == Sessions().end())
|
2019-02-21 16:45:33 +00:00
|
|
|
{
|
2019-06-14 12:49:45 +00:00
|
|
|
return;
|
2019-02-21 16:45:33 +00:00
|
|
|
}
|
|
|
|
itr->second.replyIntro = intro;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Endpoint::GetReplyIntroFor(const ConvoTag& tag, Introduction& intro) const
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr == Sessions().end())
|
2019-02-21 16:45:33 +00:00
|
|
|
return false;
|
|
|
|
intro = itr->second.replyIntro;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-08-09 19:02:17 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::GetConvoTagsForService(const Address& addr, std::set<ConvoTag>& tags) const
|
2018-08-09 19:02:17 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
return EndpointUtil::GetConvoTagsForService(Sessions(), addr, tags);
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::GetCachedSessionKeyFor(const ConvoTag& tag, SharedSecret& secret) const
|
2018-08-09 19:02:17 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr == Sessions().end())
|
2018-08-09 19:02:17 +00:00
|
|
|
return false;
|
2019-01-02 01:04:04 +00:00
|
|
|
secret = itr->second.sharedKey;
|
2018-08-09 19:02:17 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::PutCachedSessionKeyFor(const ConvoTag& tag, const SharedSecret& k)
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr == Sessions().end())
|
2018-08-09 19:02:17 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
itr = Sessions().emplace(tag, Session{}).first;
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
|
|
|
itr->second.sharedKey = k;
|
|
|
|
}
|
|
|
|
|
2019-09-19 20:28:12 +00:00
|
|
|
void
|
2021-04-02 15:10:37 +00:00
|
|
|
Endpoint::ConvoTagTX(const ConvoTag& tag)
|
2019-09-19 20:28:12 +00:00
|
|
|
{
|
2021-05-09 12:00:31 +00:00
|
|
|
if (Sessions().count(tag))
|
|
|
|
Sessions()[tag].TX();
|
2021-04-02 15:10:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::ConvoTagRX(const ConvoTag& tag)
|
|
|
|
{
|
2021-05-09 12:00:31 +00:00
|
|
|
if (Sessions().count(tag))
|
|
|
|
Sessions()[tag].RX();
|
2019-09-19 20:28:12 +00:00
|
|
|
}
|
|
|
|
|
2018-07-11 16:11:19 +00:00
|
|
|
bool
|
2019-01-16 21:12:24 +00:00
|
|
|
Endpoint::LoadKeyFile()
|
2018-07-11 16:11:19 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
const auto& keyfile = m_state->m_Keyfile;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!keyfile.empty())
|
2018-07-11 16:11:19 +00:00
|
|
|
{
|
2020-09-22 19:04:31 +00:00
|
|
|
m_Identity.EnsureKeys(keyfile, Router()->keyManager()->needBackup());
|
2018-07-11 16:11:19 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-05-28 19:45:08 +00:00
|
|
|
m_Identity.RegenerateKeys();
|
2018-07-11 16:11:19 +00:00
|
|
|
}
|
2019-01-16 21:12:24 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Endpoint::Start()
|
|
|
|
{
|
2019-01-18 02:28:30 +00:00
|
|
|
// how can I tell if a m_Identity isn't loaded?
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!m_DataHandler)
|
2018-08-09 19:02:17 +00:00
|
|
|
{
|
|
|
|
m_DataHandler = this;
|
|
|
|
}
|
2018-08-16 14:34:15 +00:00
|
|
|
// this does network isolation
|
2020-04-07 18:38:56 +00:00
|
|
|
while (m_state->m_OnInit.size())
|
2018-08-09 19:02:17 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (m_state->m_OnInit.front()())
|
2019-07-15 09:15:51 +00:00
|
|
|
m_state->m_OnInit.pop_front();
|
2018-08-09 19:02:17 +00:00
|
|
|
else
|
2019-01-16 21:12:24 +00:00
|
|
|
{
|
2019-04-21 15:40:32 +00:00
|
|
|
LogWarn("Can't call init of network isolation");
|
2018-08-09 19:02:17 +00:00
|
|
|
return false;
|
2019-01-16 21:12:24 +00:00
|
|
|
}
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
2018-07-11 16:11:19 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-06-22 17:48:45 +00:00
|
|
|
// Keep this here (rather than the header) so that we don't need to include endpoint_state.hpp
|
|
|
|
// in endpoint.hpp for the unique_ptr member destructor.
|
|
|
|
Endpoint::~Endpoint() = default;
|
|
|
|
|
2020-02-10 17:52:24 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::PublishIntroSet(const EncryptedIntroSet& introset, AbstractRouter* r)
|
2020-02-10 17:52:24 +00:00
|
|
|
{
|
2021-06-21 20:01:38 +00:00
|
|
|
const auto paths = GetManyPathsWithUniqueEndpoints(
|
|
|
|
this,
|
|
|
|
llarp::dht::IntroSetRelayRedundancy,
|
|
|
|
dht::Key_t{introset.derivedSigningKey.as_array()});
|
2020-02-26 20:27:27 +00:00
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
if (paths.size() != llarp::dht::IntroSetRelayRedundancy)
|
2020-03-01 15:48:43 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
LogWarn(
|
|
|
|
"Cannot publish intro set because we only have ",
|
|
|
|
paths.size(),
|
|
|
|
" paths, but need ",
|
|
|
|
llarp::dht::IntroSetRelayRedundancy);
|
2020-02-26 20:27:27 +00:00
|
|
|
return false;
|
2020-03-01 15:48:43 +00:00
|
|
|
}
|
2020-02-26 20:27:27 +00:00
|
|
|
|
2020-02-10 17:34:14 +00:00
|
|
|
// do publishing for each path selected
|
|
|
|
size_t published = 0;
|
2020-03-04 05:57:07 +00:00
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& path : paths)
|
2020-02-10 17:34:14 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
for (size_t i = 0; i < llarp::dht::IntroSetRequestsPerRelay; ++i)
|
2020-02-10 17:34:14 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
r->NotifyRouterEvent<tooling::PubIntroSentEvent>(
|
2020-03-07 01:20:11 +00:00
|
|
|
r->pubkey(),
|
|
|
|
llarp::dht::Key_t{introset.derivedSigningKey.as_array()},
|
2020-04-07 18:38:56 +00:00
|
|
|
RouterID(path->hops[path->hops.size() - 1].rc.pubkey),
|
|
|
|
published);
|
|
|
|
if (PublishIntroSetVia(introset, r, path, published))
|
2020-02-26 20:27:27 +00:00
|
|
|
published++;
|
2020-02-10 17:34:14 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
if (published != llarp::dht::IntroSetStorageRedundancy)
|
|
|
|
LogWarn(
|
|
|
|
"Publish introset failed: could only publish ",
|
|
|
|
published,
|
|
|
|
" copies but wanted ",
|
|
|
|
llarp::dht::IntroSetStorageRedundancy);
|
2020-03-02 16:54:11 +00:00
|
|
|
return published == llarp::dht::IntroSetStorageRedundancy;
|
2018-07-18 03:10:21 +00:00
|
|
|
}
|
|
|
|
|
2018-09-18 14:48:06 +00:00
|
|
|
struct PublishIntroSetJob : public IServiceLookup
|
|
|
|
{
|
2020-01-27 21:30:41 +00:00
|
|
|
EncryptedIntroSet m_IntroSet;
|
2018-09-18 14:48:06 +00:00
|
|
|
Endpoint* m_Endpoint;
|
2020-02-20 00:07:46 +00:00
|
|
|
uint64_t m_relayOrder;
|
2020-04-07 18:38:56 +00:00
|
|
|
PublishIntroSetJob(
|
2021-05-05 12:32:07 +00:00
|
|
|
Endpoint* parent,
|
|
|
|
uint64_t id,
|
|
|
|
EncryptedIntroSet introset,
|
|
|
|
uint64_t relayOrder,
|
|
|
|
llarp_time_t timeout)
|
|
|
|
: IServiceLookup(parent, id, "PublishIntroSet", timeout)
|
2019-07-30 23:42:13 +00:00
|
|
|
, m_IntroSet(std::move(introset))
|
2018-09-18 14:48:06 +00:00
|
|
|
, m_Endpoint(parent)
|
2020-02-20 00:07:46 +00:00
|
|
|
, m_relayOrder(relayOrder)
|
Config file improvements (#1397)
* Config file API/comment improvements
API improvements:
=================
Make the config API use position-independent tag parameters (Required,
Default{123}, MultiValue) rather than a sequence of bools with
overloads. For example, instead of:
conf.defineOption<int>("a", "b", false, true, 123, [] { ... });
you now write:
conf.defineOption<int>("a", "b", MultiValue, Default{123}, [] { ... });
The tags are:
- Required
- MultiValue
- Default{value}
plus new abilities (see below):
- Hidden
- RelayOnly
- ClientOnly
- Comment{"line1", "line2", "line3"}
Made option definition more powerful:
=====================================
- `Hidden` allows you to define an option that won't show up in the
generated config file if it isn't set.
- `RelayOnly`/`ClientOnly` sets up an option that is only accepted and
only shows up for relay or client configs. (If neither is specified
the option shows up in both modes).
- `Comment{...}` lets the option comments be specified as part of the
defineOption.
Comment improvements
====================
- Rewrote comments for various options to expand on details.
- Inlined all the comments with the option definitions.
- Several options that were missing comments got comments added.
- Made various options for deprecated and or internal options hidden by
default so that they don't show up in a default config file.
- show the section comment (but not option comments) *after* the
[section] tag instead of before it as it makes more sense that way
(particularly for the [bind] section which has a new long comment to
describe how it works).
Disable profiling by default
============================
We had this weird state where we use and store profiling by default but
never *load* it when starting up. This commit makes us just not use
profiling at all unless explicitly enabled.
Other misc changes:
===================
- change default worker threads to 0 (= num cpus) instead of 1, and fix
it to allow 0.
- Actually apply worker-threads option
- fixed default data-dir value erroneously having quotes around it
- reordered ifname/ifaddr/mapaddr (was previously mapaddr/ifaddr/ifname)
as mapaddr is a sort of specialization of ifaddr and so makes more
sense to come after it (particularly because it now references ifaddr
in its help message).
- removed peer-stats option (since we always require it for relays and
never use it for clients)
- removed router profiles filename option (this doesn't need to be
configurable)
- removed defunct `service-node-seed` option
- Change default logging output file to "" (which means stdout), and
also made "-" work for stdout.
* Router hive compilation fixes
* Comments for SNApp SRV settings in ini file
* Add extra blank line after section comments
* Better deprecated option handling
Allow {client,relay}-only options in {relay,client} configs to be
specified as implicitly deprecated options: they warn, and don't set
anything.
Add an explicit `Deprecated` tag and move deprecated option handling
into definition.cpp.
* Move backwards compat options into section definitions
Keep the "addBackwardsCompatibleConfigOptions" only for options in
sections that no longer exist.
* Fix INI parsing issues & C++17-ify
- don't allow inline comments because it seems they aren't allowed in
ini formats in general, and is going to cause problems if there is a
comment character in a value (e.g. an exit auth string). Additionally
it was breaking on a line such as:
# some comment; see?
because it was treating only `; see?` as the comment and then producing
an error message about the rest of the line being invalid.
- make section parsing stricter: the `[` and `]` have to be at the
beginning at end of the line now (after stripping whitespace).
- Move whitespace stripping to the top since everything in here does it.
- chop off string_view suffix/prefix rather than maintaining position
values
- fix potential infinite loop/segfault when given a line such as `]foo[`
* Make config parsing failure fatal
Load() LogError's and returns false on failure, so we weren't aborting
on config file errors.
* Formatting: allow `{}` for empty functions/structs
Instead of using two lines when empty:
{
}
* Make default dns bind 127.0.0.1 on non-Linux
* Don't show empty section; fix tests
We can conceivably have sections that only make sense for clients or
relays, and so want to completely omit that section if we have no
options for the type of config being generated.
Also fixes missing empty lines between tests.
Co-authored-by: Thomas Winget <tewinget@gmail.com>
2020-10-07 22:22:58 +00:00
|
|
|
{}
|
2018-09-18 14:48:06 +00:00
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
std::shared_ptr<routing::IMessage>
|
2019-07-30 23:42:13 +00:00
|
|
|
BuildRequestMessage() override
|
2018-09-18 14:48:06 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
auto msg = std::make_shared<routing::DHTMessage>();
|
|
|
|
msg->M.emplace_back(
|
|
|
|
std::make_unique<dht::PublishIntroMessage>(m_IntroSet, txid, true, m_relayOrder));
|
2018-09-18 14:48:06 +00:00
|
|
|
return msg;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2020-09-17 19:18:08 +00:00
|
|
|
HandleIntrosetResponse(const std::set<EncryptedIntroSet>& response) override
|
2018-09-18 14:48:06 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (not response.empty())
|
2018-09-18 14:48:06 +00:00
|
|
|
m_Endpoint->IntroSetPublished();
|
|
|
|
else
|
|
|
|
m_Endpoint->IntroSetPublishFail();
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-07-18 03:10:21 +00:00
|
|
|
void
|
|
|
|
Endpoint::IntroSetPublishFail()
|
|
|
|
{
|
2019-02-21 19:55:31 +00:00
|
|
|
auto now = Now();
|
2020-04-07 18:38:56 +00:00
|
|
|
if (ShouldPublishDescriptors(now))
|
2019-02-21 19:55:31 +00:00
|
|
|
{
|
2019-11-05 16:58:53 +00:00
|
|
|
RegenAndPublishIntroSet();
|
2019-02-21 19:55:31 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (NumInStatus(path::ePathEstablished) < 3)
|
2019-02-21 19:55:31 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (introSet().HasExpiredIntros(now))
|
2019-02-21 19:55:31 +00:00
|
|
|
ManualRebuild(1);
|
|
|
|
}
|
2018-09-18 14:48:06 +00:00
|
|
|
}
|
|
|
|
|
2022-01-13 21:24:08 +00:00
|
|
|
size_t
|
|
|
|
Endpoint::UniqueEndpoints() const
|
|
|
|
{
|
|
|
|
return m_state->m_RemoteSessions.size() + m_state->m_SNodeSessions.size();
|
|
|
|
}
|
|
|
|
|
2021-05-05 12:32:07 +00:00
|
|
|
constexpr auto PublishIntrosetTimeout = 20s;
|
|
|
|
|
2018-09-18 14:48:06 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::PublishIntroSetVia(
|
|
|
|
const EncryptedIntroSet& introset,
|
|
|
|
AbstractRouter* r,
|
|
|
|
path::Path_ptr path,
|
|
|
|
uint64_t relayOrder)
|
2018-09-18 14:48:06 +00:00
|
|
|
{
|
2021-05-05 12:32:07 +00:00
|
|
|
auto job =
|
|
|
|
new PublishIntroSetJob(this, GenTXID(), introset, relayOrder, PublishIntrosetTimeout);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (job->SendRequestViaPath(path, r))
|
2018-09-18 14:48:06 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
m_state->m_LastPublishAttempt = Now();
|
2018-09-18 14:48:06 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
2018-07-18 03:10:21 +00:00
|
|
|
}
|
|
|
|
|
2019-05-07 17:46:38 +00:00
|
|
|
void
|
|
|
|
Endpoint::ResetInternalState()
|
|
|
|
{
|
|
|
|
path::Builder::ResetInternalState();
|
2019-07-01 14:56:56 +00:00
|
|
|
static auto resetState = [](auto& container, auto getter) {
|
|
|
|
std::for_each(container.begin(), container.end(), [getter](auto& item) {
|
|
|
|
getter(item)->ResetInternalState();
|
|
|
|
});
|
2019-05-07 17:46:38 +00:00
|
|
|
};
|
2020-04-07 18:38:56 +00:00
|
|
|
resetState(m_state->m_RemoteSessions, [](const auto& item) { return item.second; });
|
2021-04-07 12:03:04 +00:00
|
|
|
resetState(m_state->m_SNodeSessions, [](const auto& item) { return item.second; });
|
2019-05-07 17:46:38 +00:00
|
|
|
}
|
|
|
|
|
2018-07-18 03:10:21 +00:00
|
|
|
bool
|
2018-07-18 22:50:05 +00:00
|
|
|
Endpoint::ShouldPublishDescriptors(llarp_time_t now) const
|
2018-07-18 03:10:21 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (not m_PublishIntroSet)
|
2020-02-11 21:48:36 +00:00
|
|
|
return false;
|
2019-07-15 09:15:51 +00:00
|
|
|
|
2021-11-16 15:59:38 +00:00
|
|
|
const auto lastEventAt = std::max(m_state->m_LastPublishAttempt, m_state->m_LastPublish);
|
|
|
|
const auto next_pub = lastEventAt
|
2021-11-16 17:21:11 +00:00
|
|
|
+ (m_state->m_IntroSet.HasStaleIntros(now, path::intro_stale_threshold)
|
2021-06-02 19:52:13 +00:00
|
|
|
? IntrosetPublishRetryCooldown
|
|
|
|
: IntrosetPublishInterval);
|
2020-03-01 17:53:50 +00:00
|
|
|
|
2021-11-16 15:59:38 +00:00
|
|
|
return now >= next_pub;
|
2018-07-18 03:10:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::IntroSetPublished()
|
|
|
|
{
|
2020-03-01 18:04:37 +00:00
|
|
|
const auto now = Now();
|
|
|
|
// We usually get 4 confirmations back (one for each DHT location), which
|
|
|
|
// is noisy: suppress this log message if we already had a confirmation in
|
|
|
|
// the last second.
|
2020-04-07 18:38:56 +00:00
|
|
|
if (m_state->m_LastPublish < now - 1s)
|
2020-03-01 18:04:37 +00:00
|
|
|
LogInfo(Name(), " IntroSet publish confirmed");
|
|
|
|
else
|
|
|
|
LogDebug(Name(), " Additional IntroSet publish confirmed");
|
|
|
|
|
|
|
|
m_state->m_LastPublish = now;
|
2018-07-18 03:10:21 +00:00
|
|
|
}
|
|
|
|
|
2021-02-02 14:35:40 +00:00
|
|
|
std::optional<std::vector<RouterContact>>
|
|
|
|
Endpoint::GetHopsForBuild()
|
|
|
|
{
|
|
|
|
std::unordered_set<RouterID> exclude;
|
|
|
|
ForEachPath([&exclude](auto path) { exclude.insert(path->Endpoint()); });
|
2021-06-02 19:52:13 +00:00
|
|
|
const auto maybe =
|
|
|
|
m_router->nodedb()->GetRandom([exclude, r = m_router](const auto& rc) -> bool {
|
|
|
|
return exclude.count(rc.pubkey) == 0
|
|
|
|
and not r->routerProfiling().IsBadForPath(rc.pubkey);
|
|
|
|
});
|
2021-02-02 14:35:40 +00:00
|
|
|
if (not maybe.has_value())
|
|
|
|
return std::nullopt;
|
|
|
|
return GetHopsForBuildWithEndpoint(maybe->pubkey);
|
|
|
|
}
|
2019-05-10 16:19:33 +00:00
|
|
|
|
2021-02-02 14:35:40 +00:00
|
|
|
std::optional<std::vector<RouterContact>>
|
|
|
|
Endpoint::GetHopsForBuildWithEndpoint(RouterID endpoint)
|
2019-05-10 16:19:33 +00:00
|
|
|
{
|
2021-02-18 13:28:53 +00:00
|
|
|
return path::Builder::GetHopsAlignedToForBuild(endpoint, SnodeBlacklist());
|
2019-05-10 16:19:33 +00:00
|
|
|
}
|
|
|
|
|
2020-02-24 18:15:12 +00:00
|
|
|
void
|
|
|
|
Endpoint::PathBuildStarted(path::Path_ptr path)
|
|
|
|
{
|
|
|
|
path::Builder::PathBuildStarted(path);
|
|
|
|
}
|
|
|
|
|
2021-06-02 19:52:13 +00:00
|
|
|
constexpr auto MaxOutboundContextPerRemote = 1;
|
2021-05-03 15:09:45 +00:00
|
|
|
|
2018-07-22 23:14:29 +00:00
|
|
|
void
|
2021-03-27 18:54:09 +00:00
|
|
|
Endpoint::PutNewOutboundContext(const service::IntroSet& introset, llarp_time_t left)
|
2018-07-22 23:14:29 +00:00
|
|
|
{
|
2021-06-02 19:52:13 +00:00
|
|
|
const Address addr{introset.addressKeys.Addr()};
|
2018-07-22 23:14:29 +00:00
|
|
|
|
2019-07-15 09:15:51 +00:00
|
|
|
auto& remoteSessions = m_state->m_RemoteSessions;
|
|
|
|
|
2021-06-02 19:52:13 +00:00
|
|
|
if (remoteSessions.count(addr) < MaxOutboundContextPerRemote)
|
2018-07-22 23:14:29 +00:00
|
|
|
{
|
2021-06-02 19:52:13 +00:00
|
|
|
remoteSessions.emplace(addr, std::make_shared<OutboundContext>(introset, this));
|
|
|
|
LogInfo("Created New outbound context for ", addr.ToString());
|
2018-07-22 23:14:29 +00:00
|
|
|
}
|
|
|
|
|
2021-06-02 19:52:13 +00:00
|
|
|
auto sessionRange = remoteSessions.equal_range(addr);
|
|
|
|
for (auto itr = sessionRange.first; itr != sessionRange.second; ++itr)
|
2018-07-22 23:14:29 +00:00
|
|
|
{
|
2021-06-02 19:52:13 +00:00
|
|
|
itr->second->AddReadyHook(
|
|
|
|
[addr, this](auto session) { InformPathToService(addr, session); }, left);
|
2018-07-22 23:14:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-09 20:28:56 +00:00
|
|
|
void
|
2021-02-02 14:35:40 +00:00
|
|
|
Endpoint::HandleVerifyGotRouter(dht::GotRouterMessage_constptr msg, RouterID id, bool valid)
|
2019-05-09 20:28:56 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto& pendingRouters = m_state->m_PendingRouters;
|
2021-02-02 14:35:40 +00:00
|
|
|
auto itr = pendingRouters.find(id);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr != pendingRouters.end())
|
2019-05-09 20:28:56 +00:00
|
|
|
{
|
2021-02-02 14:35:40 +00:00
|
|
|
if (valid)
|
2020-01-23 12:11:11 +00:00
|
|
|
itr->second.InformResult(msg->foundRCs);
|
2019-05-09 20:28:56 +00:00
|
|
|
else
|
|
|
|
itr->second.InformResult({});
|
2019-07-15 09:15:51 +00:00
|
|
|
pendingRouters.erase(itr);
|
2019-05-09 20:28:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-10 21:34:11 +00:00
|
|
|
bool
|
2019-05-03 13:15:03 +00:00
|
|
|
Endpoint::HandleGotRouterMessage(dht::GotRouterMessage_constptr msg)
|
2018-08-10 21:34:11 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (not msg->foundRCs.empty())
|
2020-01-23 12:11:11 +00:00
|
|
|
{
|
2021-03-02 07:02:59 +00:00
|
|
|
for (auto& rc : msg->foundRCs)
|
2020-01-23 12:11:11 +00:00
|
|
|
{
|
2021-03-02 07:02:59 +00:00
|
|
|
Router()->QueueWork([this, rc, msg]() mutable {
|
2021-02-02 14:35:40 +00:00
|
|
|
bool valid = rc.Verify(llarp::time_now_ms());
|
2021-03-02 07:02:59 +00:00
|
|
|
Router()->loop()->call([this, valid, rc = std::move(rc), msg] {
|
|
|
|
Router()->nodedb()->PutIfNewer(rc);
|
|
|
|
HandleVerifyGotRouter(msg, rc.pubkey, valid);
|
2021-02-02 14:35:40 +00:00
|
|
|
});
|
|
|
|
});
|
2020-01-23 12:11:11 +00:00
|
|
|
}
|
2019-05-03 13:15:03 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto& routers = m_state->m_PendingRouters;
|
2020-04-07 18:38:56 +00:00
|
|
|
auto itr = routers.begin();
|
|
|
|
while (itr != routers.end())
|
2019-05-09 15:51:21 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr->second.txid == msg->txid)
|
2019-05-09 15:51:21 +00:00
|
|
|
{
|
|
|
|
itr->second.InformResult({});
|
2019-07-15 09:15:51 +00:00
|
|
|
itr = routers.erase(itr);
|
2019-05-09 15:51:21 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
++itr;
|
|
|
|
}
|
2018-08-10 21:34:11 +00:00
|
|
|
}
|
2019-04-17 14:46:00 +00:00
|
|
|
return true;
|
2018-08-10 21:34:11 +00:00
|
|
|
}
|
|
|
|
|
2020-09-17 19:18:08 +00:00
|
|
|
struct LookupNameJob : public IServiceLookup
|
|
|
|
{
|
|
|
|
std::function<void(std::optional<Address>)> handler;
|
|
|
|
ShortHash namehash;
|
|
|
|
|
|
|
|
LookupNameJob(
|
|
|
|
Endpoint* parent,
|
|
|
|
uint64_t id,
|
|
|
|
std::string lnsName,
|
|
|
|
std::function<void(std::optional<Address>)> resultHandler)
|
|
|
|
: IServiceLookup(parent, id, lnsName), handler(resultHandler)
|
|
|
|
{
|
|
|
|
CryptoManager::instance()->shorthash(
|
|
|
|
namehash, llarp_buffer_t(lnsName.c_str(), lnsName.size()));
|
|
|
|
}
|
|
|
|
|
|
|
|
std::shared_ptr<routing::IMessage>
|
|
|
|
BuildRequestMessage() override
|
|
|
|
{
|
|
|
|
auto msg = std::make_shared<routing::DHTMessage>();
|
|
|
|
msg->M.emplace_back(std::make_unique<dht::FindNameMessage>(
|
|
|
|
dht::Key_t{}, dht::Key_t{namehash.as_array()}, txid));
|
|
|
|
return msg;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
HandleNameResponse(std::optional<Address> addr) override
|
|
|
|
{
|
|
|
|
handler(addr);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
HandleTimeout() override
|
|
|
|
{
|
|
|
|
HandleNameResponse(std::nullopt);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-09-24 00:28:38 +00:00
|
|
|
bool
|
|
|
|
Endpoint::HasExit() const
|
|
|
|
{
|
2020-10-21 09:39:01 +00:00
|
|
|
for (const auto& [name, info] : m_StartupLNSMappings)
|
|
|
|
{
|
|
|
|
if (info.first.has_value())
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-09-24 00:28:38 +00:00
|
|
|
return not m_ExitMap.Empty();
|
|
|
|
}
|
|
|
|
|
2022-11-03 13:05:33 +00:00
|
|
|
path::Path::UniqueEndpointSet_t
|
|
|
|
Endpoint::GetUniqueEndpointsForLookup() const
|
|
|
|
{
|
|
|
|
path::Path::UniqueEndpointSet_t paths;
|
|
|
|
ForEachPath([&paths](auto path) {
|
|
|
|
if (path and path->IsReady())
|
|
|
|
paths.insert(path);
|
|
|
|
});
|
|
|
|
return paths;
|
|
|
|
}
|
|
|
|
|
2022-10-25 00:39:05 +00:00
|
|
|
bool
|
2022-11-03 13:05:33 +00:00
|
|
|
Endpoint::ReadyForNetwork() const
|
2022-10-25 00:39:05 +00:00
|
|
|
{
|
2022-11-03 13:05:33 +00:00
|
|
|
return IsReady() and ReadyToDoLookup(GetUniqueEndpointsForLookup().size());
|
|
|
|
}
|
2022-10-25 00:39:05 +00:00
|
|
|
|
2022-11-03 13:05:33 +00:00
|
|
|
bool
|
|
|
|
Endpoint::ReadyToDoLookup(size_t num_paths) const
|
|
|
|
{
|
2022-11-03 14:49:57 +00:00
|
|
|
// Currently just checks the number of paths, but could do more checks in the future.
|
2022-11-03 13:05:33 +00:00
|
|
|
return num_paths >= MIN_ENDPOINTS_FOR_LNS_LOOKUP;
|
2022-10-25 00:39:05 +00:00
|
|
|
}
|
|
|
|
|
2021-03-19 19:30:09 +00:00
|
|
|
void
|
2021-03-19 20:06:03 +00:00
|
|
|
Endpoint::LookupNameAsync(
|
|
|
|
std::string name,
|
2021-03-20 19:18:04 +00:00
|
|
|
std::function<void(std::optional<std::variant<Address, RouterID>>)> handler)
|
2020-09-17 19:18:08 +00:00
|
|
|
{
|
2021-04-04 11:30:46 +00:00
|
|
|
if (not NameIsValid(name))
|
|
|
|
{
|
2022-04-16 16:41:34 +00:00
|
|
|
handler(ParseAddress(name));
|
2021-04-04 11:30:46 +00:00
|
|
|
return;
|
|
|
|
}
|
2020-09-17 19:18:08 +00:00
|
|
|
auto& cache = m_state->nameCache;
|
|
|
|
const auto maybe = cache.Get(name);
|
|
|
|
if (maybe.has_value())
|
|
|
|
{
|
|
|
|
handler(maybe);
|
2021-03-19 19:30:09 +00:00
|
|
|
return;
|
2020-09-17 19:18:08 +00:00
|
|
|
}
|
2020-10-21 09:39:01 +00:00
|
|
|
LogInfo(Name(), " looking up LNS name: ", name);
|
2022-11-03 13:05:33 +00:00
|
|
|
auto paths = GetUniqueEndpointsForLookup();
|
2021-03-19 14:09:06 +00:00
|
|
|
// not enough paths
|
2022-10-25 00:39:05 +00:00
|
|
|
if (not ReadyToDoLookup(paths.size()))
|
2021-03-19 14:09:06 +00:00
|
|
|
{
|
2021-04-12 11:39:07 +00:00
|
|
|
LogWarn(
|
|
|
|
Name(),
|
|
|
|
" not enough paths for lns lookup, have ",
|
|
|
|
paths.size(),
|
|
|
|
" need ",
|
2022-10-25 00:39:05 +00:00
|
|
|
MIN_ENDPOINTS_FOR_LNS_LOOKUP);
|
2021-03-19 14:09:06 +00:00
|
|
|
handler(std::nullopt);
|
2021-03-19 19:30:09 +00:00
|
|
|
return;
|
2021-03-19 14:09:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
auto maybeInvalidateCache = [handler, &cache, name](auto result) {
|
2021-03-20 15:47:36 +00:00
|
|
|
if (result)
|
2021-03-20 14:05:36 +00:00
|
|
|
{
|
2021-03-20 15:47:36 +00:00
|
|
|
var::visit(
|
2021-05-03 23:42:13 +00:00
|
|
|
[&result, &cache, name](auto&& value) {
|
2021-03-20 15:47:36 +00:00
|
|
|
if (value.IsZero())
|
|
|
|
{
|
2021-05-03 14:31:35 +00:00
|
|
|
cache.Remove(name);
|
2021-03-20 15:47:36 +00:00
|
|
|
result = std::nullopt;
|
|
|
|
}
|
|
|
|
},
|
|
|
|
*result);
|
2021-03-20 14:05:36 +00:00
|
|
|
}
|
2021-03-19 14:13:03 +00:00
|
|
|
if (result)
|
|
|
|
{
|
|
|
|
cache.Put(name, *result);
|
|
|
|
}
|
2021-03-19 14:09:06 +00:00
|
|
|
handler(result);
|
|
|
|
};
|
|
|
|
|
2022-10-25 00:39:05 +00:00
|
|
|
constexpr size_t max_lns_lookup_endpoints = 7;
|
2021-06-19 11:45:22 +00:00
|
|
|
// pick up to max_unique_lns_endpoints random paths to do lookups from
|
|
|
|
std::vector<path::Path_ptr> chosenpaths;
|
|
|
|
chosenpaths.insert(chosenpaths.begin(), paths.begin(), paths.end());
|
|
|
|
std::shuffle(chosenpaths.begin(), chosenpaths.end(), CSRNG{});
|
2022-10-25 00:39:05 +00:00
|
|
|
chosenpaths.resize(std::min(paths.size(), max_lns_lookup_endpoints));
|
2021-06-19 11:45:22 +00:00
|
|
|
|
2021-03-19 14:09:06 +00:00
|
|
|
auto resultHandler =
|
2021-06-19 11:45:22 +00:00
|
|
|
m_state->lnsTracker.MakeResultHandler(name, chosenpaths.size(), maybeInvalidateCache);
|
2021-03-19 14:09:06 +00:00
|
|
|
|
2021-06-19 11:45:22 +00:00
|
|
|
for (const auto& path : chosenpaths)
|
2021-03-19 14:09:06 +00:00
|
|
|
{
|
|
|
|
LogInfo(Name(), " lookup ", name, " from ", path->Endpoint());
|
2021-05-01 12:59:56 +00:00
|
|
|
auto job = new LookupNameJob{this, GenTXID(), name, resultHandler};
|
2021-03-19 14:09:06 +00:00
|
|
|
job->SendRequestViaPath(path, m_router);
|
|
|
|
}
|
2020-09-17 19:18:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Endpoint::HandleGotNameMessage(std::shared_ptr<const dht::GotNameMessage> msg)
|
|
|
|
{
|
|
|
|
auto& lookups = m_state->m_PendingLookups;
|
|
|
|
auto itr = lookups.find(msg->TxID);
|
|
|
|
if (itr == lookups.end())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// decrypt entry
|
|
|
|
const auto maybe = msg->result.Decrypt(itr->second->name);
|
|
|
|
// inform result
|
|
|
|
itr->second->HandleNameResponse(maybe);
|
|
|
|
lookups.erase(itr);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-08-10 21:34:11 +00:00
|
|
|
void
|
|
|
|
Endpoint::EnsureRouterIsKnown(const RouterID& router)
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (router.IsZero())
|
2018-08-14 22:07:58 +00:00
|
|
|
return;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!Router()->nodedb()->Has(router))
|
2018-08-10 21:34:11 +00:00
|
|
|
{
|
2019-05-03 13:15:03 +00:00
|
|
|
LookupRouterAnon(router, nullptr);
|
2018-12-19 17:48:29 +00:00
|
|
|
}
|
|
|
|
}
|
2018-08-10 21:34:11 +00:00
|
|
|
|
2018-12-19 17:48:29 +00:00
|
|
|
bool
|
2019-05-03 13:15:03 +00:00
|
|
|
Endpoint::LookupRouterAnon(RouterID router, RouterLookupHandler handler)
|
2018-12-19 17:48:29 +00:00
|
|
|
{
|
2020-03-12 17:50:46 +00:00
|
|
|
using llarp::dht::FindRouterMessage;
|
|
|
|
|
2019-07-15 09:15:51 +00:00
|
|
|
auto& routers = m_state->m_PendingRouters;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (routers.find(router) == routers.end())
|
2018-12-19 17:48:29 +00:00
|
|
|
{
|
|
|
|
auto path = GetEstablishedPathClosestTo(router);
|
|
|
|
routing::DHTMessage msg;
|
|
|
|
auto txid = GenTXID();
|
2020-04-07 18:38:56 +00:00
|
|
|
msg.M.emplace_back(std::make_unique<FindRouterMessage>(txid, router));
|
2021-03-31 16:06:50 +00:00
|
|
|
if (path)
|
|
|
|
msg.S = path->NextSeqNo();
|
2020-04-07 18:38:56 +00:00
|
|
|
if (path && path->SendRoutingMessage(msg, Router()))
|
2018-12-19 17:48:29 +00:00
|
|
|
{
|
2021-06-03 12:16:05 +00:00
|
|
|
RouterLookupJob job{this, [handler, router, nodedb = m_router->nodedb()](auto results) {
|
|
|
|
if (results.empty())
|
|
|
|
{
|
|
|
|
LogInfo("could not find ", router, ", remove it from nodedb");
|
|
|
|
nodedb->Remove(router);
|
|
|
|
}
|
2021-06-03 12:44:55 +00:00
|
|
|
if (handler)
|
|
|
|
handler(results);
|
2021-06-03 12:16:05 +00:00
|
|
|
}};
|
2020-03-12 17:50:46 +00:00
|
|
|
|
|
|
|
assert(msg.M.size() == 1);
|
2020-04-07 18:38:56 +00:00
|
|
|
auto dhtMsg = dynamic_cast<FindRouterMessage*>(msg.M[0].get());
|
2020-04-10 17:26:50 +00:00
|
|
|
assert(dhtMsg != nullptr);
|
2020-03-12 17:50:46 +00:00
|
|
|
|
2020-04-10 17:26:50 +00:00
|
|
|
m_router->NotifyRouterEvent<tooling::FindRouterSentEvent>(m_router->pubkey(), *dhtMsg);
|
2020-03-10 18:12:11 +00:00
|
|
|
|
2021-05-01 12:59:56 +00:00
|
|
|
routers.emplace(router, std::move(job));
|
2018-12-19 17:48:29 +00:00
|
|
|
return true;
|
2018-08-10 21:34:11 +00:00
|
|
|
}
|
|
|
|
}
|
2018-12-19 17:48:29 +00:00
|
|
|
return false;
|
2018-08-10 21:34:11 +00:00
|
|
|
}
|
|
|
|
|
2018-08-02 00:48:43 +00:00
|
|
|
void
|
2019-04-23 14:28:59 +00:00
|
|
|
Endpoint::HandlePathBuilt(path::Path_ptr p)
|
2018-08-02 00:48:43 +00:00
|
|
|
{
|
2019-06-02 21:19:10 +00:00
|
|
|
p->SetDataHandler(util::memFn(&Endpoint::HandleHiddenServiceFrame, this));
|
|
|
|
p->SetDropHandler(util::memFn(&Endpoint::HandleDataDrop, this));
|
|
|
|
p->SetDeadChecker(util::memFn(&Endpoint::CheckPathIsDead, this));
|
2018-09-26 13:04:25 +00:00
|
|
|
path::Builder::HandlePathBuilt(p);
|
2018-08-02 00:48:43 +00:00
|
|
|
}
|
|
|
|
|
2018-09-11 15:28:36 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::HandleDataDrop(path::Path_ptr p, const PathID_t& dst, uint64_t seq)
|
2018-09-11 15:28:36 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
LogWarn(Name(), " message ", seq, " dropped by endpoint ", p->Endpoint(), " via ", dst);
|
2018-09-11 15:28:36 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
std::unordered_map<std::string, std::string>
|
2019-04-22 12:25:25 +00:00
|
|
|
Endpoint::NotifyParams() const
|
|
|
|
{
|
|
|
|
return {{"LOKINET_ADDR", m_Identity.pub.Addr().ToString()}};
|
|
|
|
}
|
|
|
|
|
2019-11-28 23:08:02 +00:00
|
|
|
void
|
|
|
|
Endpoint::FlushRecvData()
|
|
|
|
{
|
2021-11-15 14:20:57 +00:00
|
|
|
while (auto maybe = m_RecvQueue.tryPopFront())
|
2019-11-28 23:08:02 +00:00
|
|
|
{
|
2021-11-15 14:20:57 +00:00
|
|
|
auto& ev = *maybe;
|
2019-11-28 23:08:02 +00:00
|
|
|
ProtocolMessage::ProcessAsync(ev.fromPath, ev.pathid, ev.msg);
|
2021-11-15 14:20:57 +00:00
|
|
|
}
|
2019-11-28 23:08:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::QueueRecvData(RecvDataEvent ev)
|
|
|
|
{
|
2021-06-02 19:52:13 +00:00
|
|
|
m_RecvQueue.tryPushBack(std::move(ev));
|
2021-11-14 19:42:15 +00:00
|
|
|
Router()->TriggerPump();
|
2019-11-28 23:08:02 +00:00
|
|
|
}
|
|
|
|
|
2018-09-18 11:08:47 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::HandleDataMessage(
|
2021-10-12 21:37:01 +00:00
|
|
|
path::Path_ptr p, const PathID_t from, std::shared_ptr<ProtocolMessage> msg)
|
2018-09-18 11:08:47 +00:00
|
|
|
{
|
2021-06-04 23:50:54 +00:00
|
|
|
PutSenderFor(msg->tag, msg->sender, true);
|
2021-06-14 13:49:54 +00:00
|
|
|
Introduction intro = msg->introReply;
|
|
|
|
if (HasInboundConvo(msg->sender.Addr()))
|
|
|
|
{
|
|
|
|
intro.pathID = from;
|
2021-10-12 21:37:01 +00:00
|
|
|
intro.router = p->Endpoint();
|
2021-06-14 13:49:54 +00:00
|
|
|
}
|
|
|
|
PutReplyIntroFor(msg->tag, intro);
|
2021-05-15 10:23:45 +00:00
|
|
|
ConvoTagRX(msg->tag);
|
2018-09-18 17:48:26 +00:00
|
|
|
return ProcessDataMessage(msg);
|
2018-09-18 11:08:47 +00:00
|
|
|
}
|
|
|
|
|
2018-11-29 14:01:13 +00:00
|
|
|
bool
|
2019-07-01 13:44:25 +00:00
|
|
|
Endpoint::HasPathToSNode(const RouterID ident) const
|
2018-11-29 14:01:13 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto range = m_state->m_SNodeSessions.equal_range(ident);
|
2020-04-07 18:38:56 +00:00
|
|
|
auto itr = range.first;
|
|
|
|
while (itr != range.second)
|
2018-12-02 18:07:07 +00:00
|
|
|
{
|
2021-04-07 12:03:04 +00:00
|
|
|
if (itr->second->IsReady())
|
2018-11-29 14:01:13 +00:00
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
++itr;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-04-02 15:10:37 +00:00
|
|
|
EndpointBase::AddressVariant_t
|
2021-03-27 18:54:09 +00:00
|
|
|
Endpoint::LocalAddress() const
|
|
|
|
{
|
2021-04-02 15:10:37 +00:00
|
|
|
return m_Identity.pub.Addr();
|
|
|
|
}
|
|
|
|
|
2022-10-20 22:23:14 +00:00
|
|
|
std::optional<EndpointBase::SendStat>
|
|
|
|
Endpoint::GetStatFor(AddressVariant_t) const
|
2021-04-02 15:10:37 +00:00
|
|
|
{
|
2021-04-06 12:25:46 +00:00
|
|
|
// TODO: implement me
|
|
|
|
return std::nullopt;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::unordered_set<EndpointBase::AddressVariant_t>
|
|
|
|
Endpoint::AllRemoteEndpoints() const
|
|
|
|
{
|
|
|
|
std::unordered_set<AddressVariant_t> remote;
|
2021-04-02 15:10:37 +00:00
|
|
|
for (const auto& item : Sessions())
|
|
|
|
{
|
2021-04-06 12:25:46 +00:00
|
|
|
remote.insert(item.second.remote.Addr());
|
2021-04-02 15:10:37 +00:00
|
|
|
}
|
|
|
|
for (const auto& item : m_state->m_SNodeSessions)
|
|
|
|
{
|
2021-04-06 12:25:46 +00:00
|
|
|
remote.insert(item.first);
|
2021-04-02 15:10:37 +00:00
|
|
|
}
|
2021-04-06 12:25:46 +00:00
|
|
|
return remote;
|
2021-03-27 18:54:09 +00:00
|
|
|
}
|
|
|
|
|
2018-11-29 13:12:35 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::ProcessDataMessage(std::shared_ptr<ProtocolMessage> msg)
|
2018-11-29 13:12:35 +00:00
|
|
|
{
|
2021-03-08 20:48:11 +00:00
|
|
|
if ((msg->proto == ProtocolType::Exit
|
2020-06-24 13:24:07 +00:00
|
|
|
&& (m_state->m_ExitEnabled || m_ExitMap.ContainsValue(msg->sender.Addr())))
|
2021-03-27 18:54:09 +00:00
|
|
|
|| msg->proto == ProtocolType::TrafficV4 || msg->proto == ProtocolType::TrafficV6
|
|
|
|
|| (msg->proto == ProtocolType::QUIC and m_quic))
|
2018-11-29 13:12:35 +00:00
|
|
|
{
|
2020-09-04 19:55:49 +00:00
|
|
|
m_InboundTrafficQueue.tryPushBack(std::move(msg));
|
2021-11-14 18:57:34 +00:00
|
|
|
Router()->TriggerPump();
|
2019-05-22 17:47:33 +00:00
|
|
|
return true;
|
2018-11-29 13:12:35 +00:00
|
|
|
}
|
2021-03-08 20:48:11 +00:00
|
|
|
if (msg->proto == ProtocolType::Control)
|
2018-11-29 13:12:35 +00:00
|
|
|
{
|
|
|
|
// TODO: implement me (?)
|
2019-02-09 15:26:20 +00:00
|
|
|
// right now it's just random noise
|
2018-11-29 13:12:35 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-05-28 11:07:32 +00:00
|
|
|
void
|
2020-06-17 13:07:05 +00:00
|
|
|
Endpoint::AsyncProcessAuthMessage(
|
|
|
|
std::shared_ptr<ProtocolMessage> msg, std::function<void(AuthResult)> hook)
|
2020-05-28 11:07:32 +00:00
|
|
|
{
|
|
|
|
if (m_AuthPolicy)
|
|
|
|
{
|
2021-01-01 18:55:31 +00:00
|
|
|
if (not m_AuthPolicy->AsyncAuthPending(msg->tag))
|
|
|
|
{
|
|
|
|
// do 1 authentication attempt and drop everything else
|
|
|
|
m_AuthPolicy->AuthenticateAsync(std::move(msg), std::move(hook));
|
|
|
|
}
|
2020-05-28 11:07:32 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2021-03-02 15:23:38 +00:00
|
|
|
Router()->loop()->call([h = std::move(hook)] { h({AuthResultCode::eAuthAccepted, "OK"}); });
|
2020-05-28 11:21:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2021-01-01 18:55:31 +00:00
|
|
|
Endpoint::SendAuthResult(
|
2020-05-28 11:21:47 +00:00
|
|
|
path::Path_ptr path, PathID_t replyPath, ConvoTag tag, AuthResult result)
|
|
|
|
{
|
2021-05-01 12:58:50 +00:00
|
|
|
// not applicable because we are not an exit or don't have an endpoint auth policy
|
|
|
|
if ((not m_state->m_ExitEnabled) or m_AuthPolicy == nullptr)
|
|
|
|
return;
|
2021-06-02 19:52:13 +00:00
|
|
|
ProtocolFrame f{};
|
2021-02-24 18:41:23 +00:00
|
|
|
f.R = AuthResultCodeAsInt(result.code);
|
2020-05-28 11:21:47 +00:00
|
|
|
f.T = tag;
|
|
|
|
f.F = path->intro.pathID;
|
2021-06-02 19:52:13 +00:00
|
|
|
f.N.Randomize();
|
2021-02-24 12:14:15 +00:00
|
|
|
if (result.code == AuthResultCode::eAuthAccepted)
|
2021-01-01 18:55:31 +00:00
|
|
|
{
|
|
|
|
ProtocolMessage msg;
|
2021-02-24 12:14:15 +00:00
|
|
|
|
|
|
|
std::vector<byte_t> reason{};
|
|
|
|
reason.resize(result.reason.size());
|
|
|
|
std::copy_n(result.reason.c_str(), reason.size(), reason.data());
|
|
|
|
msg.PutBuffer(reason);
|
2021-03-27 18:54:09 +00:00
|
|
|
if (m_AuthPolicy)
|
|
|
|
msg.proto = ProtocolType::Auth;
|
|
|
|
else
|
|
|
|
msg.proto = ProtocolType::Control;
|
|
|
|
|
2021-01-01 18:55:31 +00:00
|
|
|
if (not GetReplyIntroFor(tag, msg.introReply))
|
|
|
|
{
|
|
|
|
LogError("Failed to send auth reply: no reply intro");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
msg.sender = m_Identity.pub;
|
|
|
|
SharedSecret sessionKey{};
|
|
|
|
if (not GetCachedSessionKeyFor(tag, sessionKey))
|
|
|
|
{
|
|
|
|
LogError("failed to send auth reply: no cached session key");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (not f.EncryptAndSign(msg, sessionKey, m_Identity))
|
|
|
|
{
|
|
|
|
LogError("Failed to encrypt and sign auth reply");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
2020-05-28 11:21:47 +00:00
|
|
|
{
|
2021-01-01 18:55:31 +00:00
|
|
|
if (not f.Sign(m_Identity))
|
|
|
|
{
|
|
|
|
LogError("failed to sign auth reply result");
|
|
|
|
return;
|
|
|
|
}
|
2020-05-28 11:07:32 +00:00
|
|
|
}
|
2021-01-01 18:55:31 +00:00
|
|
|
m_SendQueue.tryPushBack(
|
2021-03-31 16:06:50 +00:00
|
|
|
SendEvent_t{std::make_shared<routing::PathTransferMessage>(f, replyPath), path});
|
2020-05-28 11:07:32 +00:00
|
|
|
}
|
|
|
|
|
2019-03-08 16:00:45 +00:00
|
|
|
void
|
|
|
|
Endpoint::RemoveConvoTag(const ConvoTag& t)
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
Sessions().erase(t);
|
2019-03-08 16:00:45 +00:00
|
|
|
}
|
|
|
|
|
2021-06-02 19:52:13 +00:00
|
|
|
void
|
|
|
|
Endpoint::ResetConvoTag(ConvoTag tag, path::Path_ptr p, PathID_t from)
|
|
|
|
{
|
|
|
|
// send reset convo tag message
|
|
|
|
ProtocolFrame f{};
|
|
|
|
f.R = 1;
|
|
|
|
f.T = tag;
|
|
|
|
f.F = p->intro.pathID;
|
|
|
|
f.Sign(m_Identity);
|
|
|
|
{
|
|
|
|
LogWarn("invalidating convotag T=", tag);
|
|
|
|
RemoveConvoTag(tag);
|
|
|
|
m_SendQueue.tryPushBack(
|
|
|
|
SendEvent_t{std::make_shared<routing::PathTransferMessage>(f, from), p});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-02 00:48:43 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::HandleHiddenServiceFrame(path::Path_ptr p, const ProtocolFrame& frame)
|
2018-08-02 00:48:43 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (frame.R)
|
2019-03-08 16:00:45 +00:00
|
|
|
{
|
|
|
|
// handle discard
|
|
|
|
ServiceInfo si;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!GetSenderFor(frame.T, si))
|
2019-03-08 16:00:45 +00:00
|
|
|
return false;
|
|
|
|
// verify source
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!frame.Verify(si))
|
2019-03-08 16:00:45 +00:00
|
|
|
return false;
|
|
|
|
// remove convotag it doesn't exist
|
2021-06-03 12:31:50 +00:00
|
|
|
LogWarn("remove convotag T=", frame.T, " R=", frame.R, " from ", si.Addr());
|
2019-04-22 17:38:29 +00:00
|
|
|
RemoveConvoTag(frame.T);
|
2019-03-08 16:00:45 +00:00
|
|
|
return true;
|
|
|
|
}
|
2021-03-02 07:02:59 +00:00
|
|
|
if (not frame.AsyncDecryptAndVerify(Router()->loop(), p, m_Identity, this))
|
2020-09-03 22:22:22 +00:00
|
|
|
{
|
2021-06-02 19:52:13 +00:00
|
|
|
ResetConvoTag(frame.T, p, frame.F);
|
2020-09-03 22:22:22 +00:00
|
|
|
}
|
|
|
|
return true;
|
2018-08-02 00:48:43 +00:00
|
|
|
}
|
|
|
|
|
2020-11-04 16:08:29 +00:00
|
|
|
void
|
|
|
|
Endpoint::HandlePathDied(path::Path_ptr p)
|
2018-09-17 15:32:37 +00:00
|
|
|
{
|
2021-05-01 12:59:56 +00:00
|
|
|
m_router->routerProfiling().MarkPathTimeout(p.get());
|
2021-03-31 16:06:50 +00:00
|
|
|
ManualRebuild(1);
|
2020-11-04 16:08:29 +00:00
|
|
|
path::Builder::HandlePathDied(p);
|
2021-06-02 19:52:13 +00:00
|
|
|
RegenAndPublishIntroSet();
|
2019-03-30 13:02:10 +00:00
|
|
|
}
|
|
|
|
|
2018-09-13 12:27:28 +00:00
|
|
|
bool
|
2019-04-23 14:28:59 +00:00
|
|
|
Endpoint::CheckPathIsDead(path::Path_ptr, llarp_time_t dlt)
|
2018-09-13 12:27:28 +00:00
|
|
|
{
|
2019-04-05 14:58:22 +00:00
|
|
|
return dlt > path::alive_timeout;
|
2018-08-02 00:48:43 +00:00
|
|
|
}
|
|
|
|
|
2018-08-10 21:34:11 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::OnLookup(
|
2021-03-27 18:54:09 +00:00
|
|
|
const Address& addr,
|
|
|
|
std::optional<IntroSet> introset,
|
|
|
|
const RouterID& endpoint,
|
2021-06-02 19:52:13 +00:00
|
|
|
llarp_time_t timeLeft,
|
|
|
|
uint64_t relayOrder)
|
2018-08-10 21:34:11 +00:00
|
|
|
{
|
2021-06-02 19:52:13 +00:00
|
|
|
// tell all our existing remote sessions about this introset update
|
|
|
|
|
2019-08-02 09:27:27 +00:00
|
|
|
const auto now = Router()->Now();
|
2020-04-07 18:38:56 +00:00
|
|
|
auto& lookups = m_state->m_PendingServiceLookups;
|
2021-06-02 19:52:13 +00:00
|
|
|
if (introset)
|
|
|
|
{
|
|
|
|
auto& sessions = m_state->m_RemoteSessions;
|
|
|
|
auto range = sessions.equal_range(addr);
|
|
|
|
auto itr = range.first;
|
|
|
|
while (itr != range.second)
|
|
|
|
{
|
|
|
|
itr->second->OnIntroSetUpdate(addr, introset, endpoint, timeLeft, relayOrder);
|
|
|
|
// we got a successful lookup
|
|
|
|
if (itr->second->ReadyToSend() and not introset->IsExpired(now))
|
|
|
|
{
|
|
|
|
// inform all lookups
|
|
|
|
auto lookup_range = lookups.equal_range(addr);
|
|
|
|
auto i = lookup_range.first;
|
|
|
|
while (i != lookup_range.second)
|
|
|
|
{
|
|
|
|
i->second(addr, itr->second.get());
|
|
|
|
++i;
|
|
|
|
}
|
|
|
|
lookups.erase(addr);
|
|
|
|
}
|
|
|
|
++itr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
auto& fails = m_state->m_ServiceLookupFails;
|
2020-05-20 19:46:08 +00:00
|
|
|
if (not introset or introset->IsExpired(now))
|
2018-09-10 16:36:36 +00:00
|
|
|
{
|
2021-06-03 16:31:58 +00:00
|
|
|
LogError(
|
|
|
|
Name(),
|
|
|
|
" failed to lookup ",
|
|
|
|
addr.ToString(),
|
|
|
|
" from ",
|
|
|
|
endpoint,
|
|
|
|
" order=",
|
|
|
|
relayOrder);
|
2019-07-15 09:15:51 +00:00
|
|
|
fails[endpoint] = fails[endpoint] + 1;
|
2021-06-03 15:40:01 +00:00
|
|
|
|
|
|
|
const auto pendingForAddr = std::count_if(
|
|
|
|
m_state->m_PendingLookups.begin(),
|
|
|
|
m_state->m_PendingLookups.end(),
|
|
|
|
[addr](const auto& item) -> bool { return item.second->IsFor(addr); });
|
|
|
|
|
|
|
|
// inform all if we have no more pending lookups for this address
|
|
|
|
if (pendingForAddr == 0)
|
2018-09-10 16:36:36 +00:00
|
|
|
{
|
2021-06-02 19:52:13 +00:00
|
|
|
auto range = lookups.equal_range(addr);
|
|
|
|
auto itr = range.first;
|
2021-06-03 15:40:01 +00:00
|
|
|
while (itr != range.second)
|
2021-06-02 19:52:13 +00:00
|
|
|
{
|
|
|
|
itr->second(addr, nullptr);
|
|
|
|
itr = lookups.erase(itr);
|
|
|
|
}
|
2018-09-10 16:36:36 +00:00
|
|
|
}
|
2018-08-10 21:34:11 +00:00
|
|
|
return false;
|
2018-09-10 16:36:36 +00:00
|
|
|
}
|
2020-03-02 16:56:47 +00:00
|
|
|
// check for established outbound context
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
if (m_state->m_RemoteSessions.count(addr) > 0)
|
2020-03-02 16:56:47 +00:00
|
|
|
return true;
|
|
|
|
|
2021-03-27 18:54:09 +00:00
|
|
|
PutNewOutboundContext(*introset, timeLeft);
|
2018-08-10 21:34:11 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-02-18 16:00:45 +00:00
|
|
|
void
|
2021-06-05 13:06:17 +00:00
|
|
|
Endpoint::MarkAddressOutbound(AddressVariant_t addr)
|
2020-02-18 16:00:45 +00:00
|
|
|
{
|
2021-06-05 13:06:17 +00:00
|
|
|
if (auto* ptr = std::get_if<Address>(&addr))
|
|
|
|
m_state->m_OutboundSessions.insert(*ptr);
|
2020-02-18 16:00:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Endpoint::WantsOutboundSession(const Address& addr) const
|
|
|
|
{
|
|
|
|
return m_state->m_OutboundSessions.count(addr) > 0;
|
|
|
|
}
|
|
|
|
|
2021-06-02 19:52:13 +00:00
|
|
|
void
|
|
|
|
Endpoint::InformPathToService(const Address remote, OutboundContext* ctx)
|
|
|
|
{
|
|
|
|
auto& serviceLookups = m_state->m_PendingServiceLookups;
|
|
|
|
auto range = serviceLookups.equal_range(remote);
|
|
|
|
auto itr = range.first;
|
|
|
|
while (itr != range.second)
|
|
|
|
{
|
|
|
|
itr->second(remote, ctx);
|
|
|
|
++itr;
|
|
|
|
}
|
|
|
|
serviceLookups.erase(remote);
|
|
|
|
}
|
|
|
|
|
2018-07-19 04:58:39 +00:00
|
|
|
bool
|
2021-03-27 18:54:09 +00:00
|
|
|
Endpoint::EnsurePathToService(const Address remote, PathEnsureHook hook, llarp_time_t timeout)
|
2018-07-19 04:58:39 +00:00
|
|
|
{
|
2021-06-05 12:57:01 +00:00
|
|
|
if (not WantsOutboundSession(remote))
|
|
|
|
{
|
|
|
|
// we don't want to ensure paths to addresses that are inbound
|
|
|
|
// inform fail right away in that case
|
|
|
|
hook(remote, nullptr);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-03-02 17:02:21 +00:00
|
|
|
/// how many routers to use for lookups
|
2020-03-02 16:17:50 +00:00
|
|
|
static constexpr size_t NumParallelLookups = 2;
|
2020-03-02 17:02:21 +00:00
|
|
|
/// how many requests per router
|
|
|
|
static constexpr size_t RequestsPerLookup = 2;
|
2019-07-15 09:15:51 +00:00
|
|
|
|
2021-06-02 19:52:13 +00:00
|
|
|
// add response hook to list for address.
|
|
|
|
m_state->m_PendingServiceLookups.emplace(remote, hook);
|
2020-02-18 16:00:45 +00:00
|
|
|
|
2019-07-15 09:15:51 +00:00
|
|
|
auto& sessions = m_state->m_RemoteSessions;
|
2021-03-29 16:31:55 +00:00
|
|
|
{
|
|
|
|
auto range = sessions.equal_range(remote);
|
|
|
|
auto itr = range.first;
|
|
|
|
while (itr != range.second)
|
|
|
|
{
|
|
|
|
if (itr->second->ReadyToSend())
|
|
|
|
{
|
2021-06-02 19:52:13 +00:00
|
|
|
InformPathToService(remote, itr->second.get());
|
2021-03-29 16:31:55 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
++itr;
|
|
|
|
}
|
|
|
|
}
|
2021-05-01 12:59:56 +00:00
|
|
|
/// check replay filter
|
|
|
|
if (not m_IntrosetLookupFilter.Insert(remote))
|
2020-08-31 20:07:17 +00:00
|
|
|
return true;
|
2019-07-15 09:15:51 +00:00
|
|
|
|
2021-06-21 20:01:38 +00:00
|
|
|
const auto paths = GetManyPathsWithUniqueEndpoints(this, NumParallelLookups);
|
2019-02-06 15:52:00 +00:00
|
|
|
|
2019-04-22 17:38:29 +00:00
|
|
|
using namespace std::placeholders;
|
2020-02-10 17:52:24 +00:00
|
|
|
const dht::Key_t location = remote.ToKey();
|
2020-04-07 18:38:56 +00:00
|
|
|
uint64_t order = 0;
|
2020-08-31 20:07:17 +00:00
|
|
|
|
|
|
|
// flag to only add callback to list of callbacks for
|
|
|
|
// address once.
|
|
|
|
bool hookAdded = false;
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& path : paths)
|
2019-02-06 15:05:25 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
for (size_t count = 0; count < RequestsPerLookup; ++count)
|
2020-02-10 17:52:24 +00:00
|
|
|
{
|
2020-03-02 16:12:29 +00:00
|
|
|
HiddenServiceAddressLookup* job = new HiddenServiceAddressLookup(
|
2020-04-07 18:38:56 +00:00
|
|
|
this,
|
2021-06-02 19:50:14 +00:00
|
|
|
[this](auto addr, auto result, auto from, auto left, auto order) {
|
|
|
|
return OnLookup(addr, result, from, left, order);
|
2021-03-27 18:54:09 +00:00
|
|
|
},
|
2020-04-07 18:38:56 +00:00
|
|
|
location,
|
|
|
|
PubKey{remote.as_array()},
|
2021-05-01 12:59:56 +00:00
|
|
|
path->Endpoint(),
|
2020-04-07 18:38:56 +00:00
|
|
|
order,
|
2021-03-27 18:54:09 +00:00
|
|
|
GenTXID(),
|
2021-06-17 12:08:46 +00:00
|
|
|
timeout + (2 * path->intro.latency) + IntrosetLookupGraceInterval);
|
2020-04-07 18:38:56 +00:00
|
|
|
LogInfo(
|
|
|
|
"doing lookup for ",
|
|
|
|
remote,
|
|
|
|
" via ",
|
|
|
|
path->Endpoint(),
|
|
|
|
" at ",
|
|
|
|
location,
|
|
|
|
" order=",
|
|
|
|
order);
|
2020-03-02 16:12:29 +00:00
|
|
|
order++;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (job->SendRequestViaPath(path, Router()))
|
2020-03-02 16:12:29 +00:00
|
|
|
{
|
2021-05-01 12:59:56 +00:00
|
|
|
hookAdded = true;
|
2020-03-02 16:12:29 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
LogError(Name(), " send via path failed for lookup");
|
2020-02-10 17:52:24 +00:00
|
|
|
}
|
2019-02-06 15:05:25 +00:00
|
|
|
}
|
2020-08-31 20:07:17 +00:00
|
|
|
return hookAdded;
|
2018-07-19 04:58:39 +00:00
|
|
|
}
|
|
|
|
|
2021-04-06 12:25:46 +00:00
|
|
|
void
|
|
|
|
Endpoint::SRVRecordsChanged()
|
|
|
|
{
|
|
|
|
auto& introset = introSet();
|
|
|
|
introset.SRVs.clear();
|
|
|
|
for (const auto& srv : SRVRecords())
|
|
|
|
introset.SRVs.emplace_back(srv.toTuple());
|
|
|
|
|
|
|
|
RegenAndPublishIntroSet();
|
|
|
|
}
|
|
|
|
|
2019-12-30 10:19:03 +00:00
|
|
|
bool
|
2019-07-01 13:44:25 +00:00
|
|
|
Endpoint::EnsurePathToSNode(const RouterID snode, SNodeEnsureHook h)
|
2018-11-29 13:12:35 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto& nodeSessions = m_state->m_SNodeSessions;
|
2021-06-02 19:52:13 +00:00
|
|
|
|
2019-04-22 17:38:29 +00:00
|
|
|
using namespace std::placeholders;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (nodeSessions.count(snode) == 0)
|
2018-11-29 13:12:35 +00:00
|
|
|
{
|
2020-08-20 12:59:01 +00:00
|
|
|
const auto src = xhtonl(net::TruncateV6(GetIfAddr()));
|
2021-03-15 16:01:19 +00:00
|
|
|
const auto dst = xhtonl(net::TruncateV6(ObtainIPForAddr(snode)));
|
2020-08-20 12:59:01 +00:00
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
auto session = std::make_shared<exit::SNodeSession>(
|
2018-12-02 18:07:07 +00:00
|
|
|
snode,
|
2020-08-20 12:59:01 +00:00
|
|
|
[=](const llarp_buffer_t& buf) -> bool {
|
|
|
|
net::IPPacket pkt;
|
|
|
|
if (not pkt.Load(buf))
|
|
|
|
return false;
|
|
|
|
pkt.UpdateIPv4Address(src, dst);
|
2019-07-01 14:56:56 +00:00
|
|
|
/// TODO: V6
|
2021-04-07 12:03:04 +00:00
|
|
|
auto itr = m_state->m_SNodeSessions.find(snode);
|
|
|
|
if (itr == m_state->m_SNodeSessions.end())
|
|
|
|
return false;
|
|
|
|
if (const auto maybe = itr->second->CurrentPath())
|
|
|
|
return HandleInboundPacket(
|
|
|
|
ConvoTag{maybe->as_array()}, pkt.ConstBuffer(), ProtocolType::TrafficV4, 0);
|
|
|
|
return false;
|
2019-07-01 13:44:25 +00:00
|
|
|
},
|
2020-04-07 18:38:56 +00:00
|
|
|
Router(),
|
2021-04-07 12:03:04 +00:00
|
|
|
1,
|
2020-04-07 18:38:56 +00:00
|
|
|
numHops,
|
|
|
|
false,
|
2021-03-29 16:31:55 +00:00
|
|
|
this);
|
2021-04-07 12:03:04 +00:00
|
|
|
m_state->m_SNodeSessions[snode] = session;
|
2018-11-29 13:12:35 +00:00
|
|
|
}
|
2019-04-30 21:36:27 +00:00
|
|
|
EnsureRouterIsKnown(snode);
|
2019-07-15 09:15:51 +00:00
|
|
|
auto range = nodeSessions.equal_range(snode);
|
2020-04-07 18:38:56 +00:00
|
|
|
auto itr = range.first;
|
|
|
|
while (itr != range.second)
|
2019-03-01 19:10:42 +00:00
|
|
|
{
|
2021-04-07 12:03:04 +00:00
|
|
|
if (itr->second->IsReady())
|
|
|
|
h(snode, itr->second, ConvoTag{itr->second->CurrentPath()->as_array()});
|
2019-03-07 15:17:29 +00:00
|
|
|
else
|
2019-04-30 21:36:27 +00:00
|
|
|
{
|
2021-04-07 12:03:04 +00:00
|
|
|
itr->second->AddReadyHook([h, snode](auto session) {
|
|
|
|
if (session)
|
|
|
|
{
|
|
|
|
h(snode, session, ConvoTag{session->CurrentPath()->as_array()});
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
h(snode, nullptr, ConvoTag{});
|
|
|
|
}
|
|
|
|
});
|
|
|
|
if (not itr->second->BuildCooldownHit(Now()))
|
|
|
|
itr->second->BuildOne();
|
2019-04-30 21:36:27 +00:00
|
|
|
}
|
2019-03-01 19:10:42 +00:00
|
|
|
++itr;
|
|
|
|
}
|
2019-12-30 10:19:03 +00:00
|
|
|
return true;
|
2018-11-29 13:12:35 +00:00
|
|
|
}
|
|
|
|
|
2021-03-12 17:41:48 +00:00
|
|
|
bool
|
2021-03-23 18:18:21 +00:00
|
|
|
Endpoint::SendToOrQueue(ConvoTag tag, const llarp_buffer_t& pkt, ProtocolType t)
|
2021-03-12 17:41:48 +00:00
|
|
|
{
|
2021-03-27 18:54:09 +00:00
|
|
|
if (tag.IsZero())
|
2021-03-29 22:15:56 +00:00
|
|
|
{
|
|
|
|
LogWarn("SendToOrQueue failed: convo tag is zero");
|
2021-03-27 18:54:09 +00:00
|
|
|
return false;
|
2021-03-29 22:15:56 +00:00
|
|
|
}
|
2021-03-31 16:06:50 +00:00
|
|
|
LogDebug(Name(), " send ", pkt.sz, " bytes on T=", tag);
|
2021-03-16 11:56:27 +00:00
|
|
|
if (auto maybe = GetEndpointWithConvoTag(tag))
|
2021-03-29 16:31:55 +00:00
|
|
|
{
|
2021-03-31 16:06:50 +00:00
|
|
|
if (auto* ptr = std::get_if<Address>(&*maybe))
|
|
|
|
{
|
|
|
|
if (*ptr == m_Identity.pub.Addr())
|
|
|
|
{
|
2021-04-02 15:10:37 +00:00
|
|
|
ConvoTagTX(tag);
|
2021-11-12 13:51:39 +00:00
|
|
|
m_state->m_Router->TriggerPump();
|
2021-04-02 15:10:37 +00:00
|
|
|
if (not HandleInboundPacket(tag, pkt, t, 0))
|
|
|
|
return false;
|
|
|
|
ConvoTagRX(tag);
|
|
|
|
return true;
|
2021-03-31 16:06:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (not SendToOrQueue(*maybe, pkt, t))
|
|
|
|
return false;
|
|
|
|
return true;
|
2021-03-29 16:31:55 +00:00
|
|
|
}
|
2021-03-29 22:15:56 +00:00
|
|
|
LogDebug("SendToOrQueue failed: no endpoint for convo tag ", tag);
|
2021-03-16 11:56:27 +00:00
|
|
|
return false;
|
2021-03-12 17:41:48 +00:00
|
|
|
}
|
|
|
|
|
2018-11-29 13:12:35 +00:00
|
|
|
bool
|
2021-03-23 18:18:21 +00:00
|
|
|
Endpoint::SendToOrQueue(const RouterID& addr, const llarp_buffer_t& buf, ProtocolType t)
|
2018-11-29 13:12:35 +00:00
|
|
|
{
|
2021-03-29 22:15:56 +00:00
|
|
|
LogTrace("SendToOrQueue: sending to snode ", addr);
|
2020-04-07 18:38:56 +00:00
|
|
|
auto pkt = std::make_shared<net::IPPacket>();
|
|
|
|
if (!pkt->Load(buf))
|
2018-11-29 13:12:35 +00:00
|
|
|
return false;
|
2021-11-12 19:34:03 +00:00
|
|
|
EnsurePathToSNode(
|
|
|
|
addr, [this, t, pkt = std::move(pkt)](RouterID, exit::BaseSession_ptr s, ConvoTag) {
|
|
|
|
if (s)
|
|
|
|
{
|
|
|
|
s->SendPacketToRemote(pkt->ConstBuffer(), t);
|
|
|
|
Router()->TriggerPump();
|
|
|
|
}
|
|
|
|
});
|
2019-04-30 21:36:27 +00:00
|
|
|
return true;
|
2018-11-29 13:12:35 +00:00
|
|
|
}
|
|
|
|
|
2021-11-14 19:12:29 +00:00
|
|
|
void
|
|
|
|
Endpoint::Pump(llarp_time_t now)
|
2019-04-25 17:15:56 +00:00
|
|
|
{
|
2021-03-08 21:10:49 +00:00
|
|
|
FlushRecvData();
|
|
|
|
// send downstream packets to user for snode
|
|
|
|
for (const auto& [router, session] : m_state->m_SNodeSessions)
|
2021-04-07 12:03:04 +00:00
|
|
|
session->FlushDownstream();
|
2021-03-31 16:06:50 +00:00
|
|
|
|
|
|
|
// handle inbound traffic sorted
|
2022-04-04 21:50:20 +00:00
|
|
|
util::ascending_priority_queue<ProtocolMessage> queue;
|
2021-03-08 21:10:49 +00:00
|
|
|
while (not m_InboundTrafficQueue.empty())
|
|
|
|
{
|
2021-03-31 16:06:50 +00:00
|
|
|
// succ it out
|
|
|
|
queue.emplace(std::move(*m_InboundTrafficQueue.popFront()));
|
|
|
|
}
|
|
|
|
while (not queue.empty())
|
|
|
|
{
|
|
|
|
const auto& msg = queue.top();
|
|
|
|
LogDebug(
|
|
|
|
Name(),
|
|
|
|
" handle inbound packet on ",
|
|
|
|
msg.tag,
|
|
|
|
" ",
|
|
|
|
msg.payload.size(),
|
|
|
|
" bytes seqno=",
|
|
|
|
msg.seqno);
|
2021-04-01 11:13:39 +00:00
|
|
|
if (HandleInboundPacket(msg.tag, msg.payload, msg.proto, msg.seqno))
|
|
|
|
{
|
2021-04-02 15:10:37 +00:00
|
|
|
ConvoTagRX(msg.tag);
|
2021-04-01 11:13:39 +00:00
|
|
|
}
|
|
|
|
else
|
2021-03-27 18:54:09 +00:00
|
|
|
{
|
|
|
|
LogWarn("Failed to handle inbound message");
|
|
|
|
}
|
2021-03-31 16:06:50 +00:00
|
|
|
queue.pop();
|
2021-03-08 21:10:49 +00:00
|
|
|
}
|
2019-11-25 13:18:24 +00:00
|
|
|
|
2019-04-30 16:07:17 +00:00
|
|
|
auto router = Router();
|
2019-05-22 16:20:03 +00:00
|
|
|
// TODO: locking on this container
|
2021-03-08 21:10:49 +00:00
|
|
|
for (const auto& [addr, outctx] : m_state->m_RemoteSessions)
|
2021-11-14 19:12:29 +00:00
|
|
|
{
|
2021-03-08 21:10:49 +00:00
|
|
|
outctx->FlushUpstream();
|
2021-11-14 19:12:29 +00:00
|
|
|
outctx->Pump(now);
|
|
|
|
}
|
2019-05-22 16:20:03 +00:00
|
|
|
// TODO: locking on this container
|
2021-03-08 21:10:49 +00:00
|
|
|
for (const auto& [router, session] : m_state->m_SNodeSessions)
|
2021-04-07 12:03:04 +00:00
|
|
|
session->FlushUpstream();
|
2020-09-04 19:55:49 +00:00
|
|
|
|
|
|
|
// send queue flush
|
|
|
|
while (not m_SendQueue.empty())
|
2019-09-19 20:28:12 +00:00
|
|
|
{
|
2021-03-31 16:06:50 +00:00
|
|
|
SendEvent_t item = m_SendQueue.popFront();
|
|
|
|
item.first->S = item.second->NextSeqNo();
|
|
|
|
if (item.second->SendRoutingMessage(*item.first, router))
|
2021-04-02 15:10:37 +00:00
|
|
|
ConvoTagTX(item.first->T.T);
|
2019-09-19 20:28:12 +00:00
|
|
|
}
|
2020-09-04 19:55:49 +00:00
|
|
|
|
2019-12-30 13:20:50 +00:00
|
|
|
UpstreamFlush(router);
|
2019-04-25 17:15:56 +00:00
|
|
|
}
|
|
|
|
|
2021-02-17 19:26:39 +00:00
|
|
|
std::optional<ConvoTag>
|
2021-03-16 19:17:02 +00:00
|
|
|
Endpoint::GetBestConvoTagFor(std::variant<Address, RouterID> remote) const
|
2021-02-17 19:26:39 +00:00
|
|
|
{
|
2021-03-31 16:06:50 +00:00
|
|
|
// get convotag with lowest estimated RTT
|
2021-03-16 19:17:02 +00:00
|
|
|
if (auto ptr = std::get_if<Address>(&remote))
|
2021-02-17 19:26:39 +00:00
|
|
|
{
|
2021-03-31 16:06:50 +00:00
|
|
|
llarp_time_t rtt = 30s;
|
2021-03-16 19:17:02 +00:00
|
|
|
std::optional<ConvoTag> ret = std::nullopt;
|
|
|
|
for (const auto& [tag, session] : Sessions())
|
2021-02-17 19:26:39 +00:00
|
|
|
{
|
2021-03-31 10:54:28 +00:00
|
|
|
if (tag.IsZero())
|
|
|
|
continue;
|
2021-03-31 16:06:50 +00:00
|
|
|
if (session.remote.Addr() == *ptr)
|
2021-03-16 19:17:02 +00:00
|
|
|
{
|
2021-03-31 16:06:50 +00:00
|
|
|
if (*ptr == m_Identity.pub.Addr())
|
|
|
|
{
|
|
|
|
return tag;
|
|
|
|
}
|
|
|
|
if (session.inbound)
|
|
|
|
{
|
2021-06-03 12:44:55 +00:00
|
|
|
auto path = GetPathByRouter(session.replyIntro.router);
|
|
|
|
// if we have no path to the remote router that's fine still use it just in case this
|
|
|
|
// is the ONLY one we have
|
|
|
|
if (path == nullptr)
|
2021-06-03 12:39:55 +00:00
|
|
|
{
|
|
|
|
ret = tag;
|
|
|
|
continue;
|
|
|
|
}
|
2021-06-03 12:44:55 +00:00
|
|
|
|
2021-05-05 12:32:07 +00:00
|
|
|
if (path and path->IsReady())
|
2021-03-31 16:06:50 +00:00
|
|
|
{
|
|
|
|
const auto rttEstimate = (session.replyIntro.latency + path->intro.latency) * 2;
|
|
|
|
if (rttEstimate < rtt)
|
|
|
|
{
|
|
|
|
ret = tag;
|
|
|
|
rtt = rttEstimate;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
auto range = m_state->m_RemoteSessions.equal_range(*ptr);
|
|
|
|
auto itr = range.first;
|
|
|
|
while (itr != range.second)
|
|
|
|
{
|
|
|
|
if (itr->second->ReadyToSend() and itr->second->estimatedRTT > 0s)
|
|
|
|
{
|
|
|
|
if (itr->second->estimatedRTT < rtt)
|
|
|
|
{
|
|
|
|
ret = tag;
|
|
|
|
rtt = itr->second->estimatedRTT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
itr++;
|
|
|
|
}
|
|
|
|
}
|
2021-03-16 19:17:02 +00:00
|
|
|
}
|
2021-02-17 19:26:39 +00:00
|
|
|
}
|
2021-03-16 19:17:02 +00:00
|
|
|
return ret;
|
2021-02-17 19:26:39 +00:00
|
|
|
}
|
2021-04-07 12:03:04 +00:00
|
|
|
if (auto* ptr = std::get_if<RouterID>(&remote))
|
2021-03-16 19:17:02 +00:00
|
|
|
{
|
2021-04-07 12:03:04 +00:00
|
|
|
auto itr = m_state->m_SNodeSessions.find(*ptr);
|
|
|
|
if (itr == m_state->m_SNodeSessions.end())
|
|
|
|
return std::nullopt;
|
|
|
|
if (auto maybe = itr->second->CurrentPath())
|
|
|
|
return ConvoTag{maybe->as_array()};
|
2021-03-16 19:17:02 +00:00
|
|
|
}
|
|
|
|
return std::nullopt;
|
2021-02-17 19:26:39 +00:00
|
|
|
}
|
|
|
|
|
2021-03-16 19:50:37 +00:00
|
|
|
bool
|
|
|
|
Endpoint::EnsurePathTo(
|
|
|
|
std::variant<Address, RouterID> addr,
|
|
|
|
std::function<void(std::optional<ConvoTag>)> hook,
|
|
|
|
llarp_time_t timeout)
|
|
|
|
{
|
|
|
|
if (auto ptr = std::get_if<Address>(&addr))
|
|
|
|
{
|
2021-03-31 16:06:50 +00:00
|
|
|
if (*ptr == m_Identity.pub.Addr())
|
|
|
|
{
|
|
|
|
ConvoTag tag{};
|
2021-03-31 16:32:51 +00:00
|
|
|
|
|
|
|
if (auto maybe = GetBestConvoTagFor(*ptr))
|
|
|
|
tag = *maybe;
|
|
|
|
else
|
|
|
|
tag.Randomize();
|
|
|
|
PutSenderFor(tag, m_Identity.pub, true);
|
2021-04-02 15:10:37 +00:00
|
|
|
ConvoTagTX(tag);
|
2021-03-31 17:09:34 +00:00
|
|
|
Sessions()[tag].forever = true;
|
2021-03-31 16:06:50 +00:00
|
|
|
Loop()->call_soon([tag, hook]() { hook(tag); });
|
|
|
|
return true;
|
|
|
|
}
|
2021-06-05 12:57:01 +00:00
|
|
|
if (not WantsOutboundSession(*ptr))
|
|
|
|
{
|
|
|
|
// we don't want to connect back to inbound sessions
|
|
|
|
hook(std::nullopt);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-03-16 19:50:37 +00:00
|
|
|
return EnsurePathToService(
|
|
|
|
*ptr,
|
|
|
|
[hook](auto, auto* ctx) {
|
|
|
|
if (ctx)
|
|
|
|
{
|
|
|
|
hook(ctx->currentConvoTag);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
hook(std::nullopt);
|
|
|
|
}
|
|
|
|
},
|
|
|
|
timeout);
|
|
|
|
}
|
|
|
|
if (auto ptr = std::get_if<RouterID>(&addr))
|
|
|
|
{
|
|
|
|
return EnsurePathToSNode(*ptr, [hook](auto, auto session, auto tag) {
|
|
|
|
if (session)
|
|
|
|
{
|
|
|
|
hook(tag);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
hook(std::nullopt);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-08-22 15:52:10 +00:00
|
|
|
bool
|
2021-03-23 18:18:21 +00:00
|
|
|
Endpoint::SendToOrQueue(const Address& remote, const llarp_buffer_t& data, ProtocolType t)
|
2018-08-22 15:52:10 +00:00
|
|
|
{
|
2021-03-29 22:15:56 +00:00
|
|
|
LogTrace("SendToOrQueue: sending to address ", remote);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (data.sz == 0)
|
2021-03-29 22:15:56 +00:00
|
|
|
{
|
|
|
|
LogTrace("SendToOrQueue: dropping because data.sz == 0");
|
2019-11-29 00:37:58 +00:00
|
|
|
return false;
|
2021-03-29 22:15:56 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
if (HasInboundConvo(remote))
|
2018-09-14 13:43:42 +00:00
|
|
|
{
|
2021-06-14 12:02:10 +00:00
|
|
|
// inbound conversation
|
2021-03-29 22:15:56 +00:00
|
|
|
LogTrace("Have inbound convo");
|
2020-04-07 18:38:56 +00:00
|
|
|
auto transfer = std::make_shared<routing::PathTransferMessage>();
|
2019-06-14 12:49:45 +00:00
|
|
|
ProtocolFrame& f = transfer->T;
|
2020-08-22 17:53:30 +00:00
|
|
|
f.R = 0;
|
2020-04-07 18:38:56 +00:00
|
|
|
std::shared_ptr<path::Path> p;
|
2021-03-16 19:17:02 +00:00
|
|
|
if (const auto maybe = GetBestConvoTagFor(remote))
|
2018-09-14 13:43:42 +00:00
|
|
|
{
|
2019-06-28 14:12:20 +00:00
|
|
|
// the remote guy's intro
|
2021-06-14 13:49:54 +00:00
|
|
|
Introduction replyIntro;
|
2019-06-14 12:49:45 +00:00
|
|
|
SharedSecret K;
|
2021-02-17 19:26:39 +00:00
|
|
|
const auto tag = *maybe;
|
|
|
|
|
2021-06-14 12:02:10 +00:00
|
|
|
if (not GetCachedSessionKeyFor(tag, K))
|
2021-03-27 18:54:09 +00:00
|
|
|
{
|
2021-06-14 12:02:10 +00:00
|
|
|
LogError(Name(), " no cached key for inbound session from ", remote, " T=", tag);
|
2021-02-17 19:26:39 +00:00
|
|
|
return false;
|
2021-03-27 18:54:09 +00:00
|
|
|
}
|
2021-06-14 13:49:54 +00:00
|
|
|
if (not GetReplyIntroFor(tag, replyIntro))
|
2021-03-27 18:54:09 +00:00
|
|
|
{
|
2021-06-14 12:02:10 +00:00
|
|
|
LogError(Name(), "no reply intro for inbound session from ", remote, " T=", tag);
|
2021-02-17 19:26:39 +00:00
|
|
|
return false;
|
2021-03-27 18:54:09 +00:00
|
|
|
}
|
2021-06-14 12:02:10 +00:00
|
|
|
// get path for intro
|
2021-06-14 13:49:54 +00:00
|
|
|
auto p = GetPathByRouter(replyIntro.router);
|
2021-06-14 12:02:10 +00:00
|
|
|
|
|
|
|
if (not p)
|
2021-03-27 18:54:09 +00:00
|
|
|
{
|
2021-06-14 12:02:10 +00:00
|
|
|
LogWarn(
|
|
|
|
Name(),
|
|
|
|
" has no path for intro router ",
|
2021-06-14 13:49:54 +00:00
|
|
|
RouterID{replyIntro.router},
|
2021-06-14 12:02:10 +00:00
|
|
|
" for inbound convo T=",
|
|
|
|
tag);
|
|
|
|
return false;
|
2021-03-27 18:54:09 +00:00
|
|
|
}
|
2021-02-17 19:26:39 +00:00
|
|
|
|
2021-06-14 12:02:10 +00:00
|
|
|
f.T = tag;
|
|
|
|
// TODO: check expiration of our end
|
|
|
|
auto m = std::make_shared<ProtocolMessage>(f.T);
|
|
|
|
m->PutBuffer(data);
|
|
|
|
f.N.Randomize();
|
|
|
|
f.C.Zero();
|
|
|
|
f.R = 0;
|
|
|
|
transfer->Y.Randomize();
|
|
|
|
m->proto = t;
|
|
|
|
m->introReply = p->intro;
|
|
|
|
m->sender = m_Identity.pub;
|
|
|
|
if (auto maybe = GetSeqNoForConvo(f.T))
|
2019-06-14 12:49:45 +00:00
|
|
|
{
|
2021-06-14 12:02:10 +00:00
|
|
|
m->seqno = *maybe;
|
2018-09-14 13:43:42 +00:00
|
|
|
}
|
2021-03-29 22:15:56 +00:00
|
|
|
else
|
|
|
|
{
|
2021-06-14 12:02:10 +00:00
|
|
|
LogWarn(Name(), " could not set sequence number, no session T=", f.T);
|
|
|
|
return false;
|
2021-03-29 22:15:56 +00:00
|
|
|
}
|
2021-06-14 12:02:10 +00:00
|
|
|
f.S = m->seqno;
|
2021-06-14 13:49:54 +00:00
|
|
|
f.F = p->intro.pathID;
|
|
|
|
transfer->P = replyIntro.pathID;
|
2021-11-14 14:45:18 +00:00
|
|
|
Router()->QueueWork([transfer, p, m, K, this]() {
|
|
|
|
if (not transfer->T.EncryptAndSign(*m, K, m_Identity))
|
2021-06-14 12:02:10 +00:00
|
|
|
{
|
|
|
|
LogError("failed to encrypt and sign for sessionn T=", transfer->T.T);
|
|
|
|
return;
|
|
|
|
}
|
2021-11-14 14:45:18 +00:00
|
|
|
m_SendQueue.tryPushBack(SendEvent_t{transfer, p});
|
|
|
|
Router()->TriggerPump();
|
2021-06-14 12:02:10 +00:00
|
|
|
});
|
|
|
|
return true;
|
2021-03-29 22:15:56 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2021-06-14 12:02:10 +00:00
|
|
|
LogWarn(
|
|
|
|
Name(),
|
|
|
|
" SendToOrQueue on inbound convo from ",
|
|
|
|
remote,
|
|
|
|
" but get-best returned none; bug?");
|
2018-09-14 13:43:42 +00:00
|
|
|
}
|
|
|
|
}
|
2021-06-14 12:02:10 +00:00
|
|
|
if (not WantsOutboundSession(remote))
|
|
|
|
{
|
|
|
|
LogWarn(
|
|
|
|
Name(),
|
|
|
|
" SendToOrQueue on outbound session we did not mark as outbound (remote=",
|
|
|
|
remote,
|
|
|
|
")");
|
|
|
|
return false;
|
|
|
|
}
|
2021-03-30 01:36:56 +00:00
|
|
|
|
|
|
|
// Failed to find a suitable inbound convo, look for outbound
|
2021-04-02 15:10:37 +00:00
|
|
|
LogTrace("Not an inbound convo");
|
|
|
|
auto& sessions = m_state->m_RemoteSessions;
|
|
|
|
auto range = sessions.equal_range(remote);
|
|
|
|
for (auto itr = range.first; itr != range.second; ++itr)
|
|
|
|
{
|
|
|
|
if (itr->second->ReadyToSend())
|
2018-09-28 12:22:50 +00:00
|
|
|
{
|
2021-04-02 15:10:37 +00:00
|
|
|
LogTrace("Found an outbound session to use to reach ", remote);
|
|
|
|
itr->second->AsyncEncryptAndSendTo(data, t);
|
|
|
|
return true;
|
2018-09-28 12:22:50 +00:00
|
|
|
}
|
2021-04-02 15:10:37 +00:00
|
|
|
}
|
2021-06-14 12:02:10 +00:00
|
|
|
LogTrace("Making an outbound session and queuing the data");
|
|
|
|
// add pending traffic
|
|
|
|
auto& traffic = m_state->m_PendingTraffic;
|
|
|
|
traffic[remote].emplace_back(data, t);
|
|
|
|
EnsurePathToService(
|
|
|
|
remote,
|
2021-11-14 14:45:18 +00:00
|
|
|
[this](Address addr, OutboundContext* ctx) {
|
2021-06-14 12:02:10 +00:00
|
|
|
if (ctx)
|
|
|
|
{
|
2021-11-14 14:45:18 +00:00
|
|
|
for (auto& pending : m_state->m_PendingTraffic[addr])
|
2021-04-02 15:10:37 +00:00
|
|
|
{
|
2021-06-14 12:02:10 +00:00
|
|
|
ctx->AsyncEncryptAndSendTo(pending.Buffer(), pending.protocol);
|
2021-04-02 15:10:37 +00:00
|
|
|
}
|
2021-06-14 12:02:10 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LogWarn("no path made to ", addr);
|
|
|
|
}
|
2021-11-14 14:45:18 +00:00
|
|
|
m_state->m_PendingTraffic.erase(addr);
|
2021-06-14 12:02:10 +00:00
|
|
|
},
|
|
|
|
PathAlignmentTimeout());
|
|
|
|
return true;
|
2018-09-24 19:50:52 +00:00
|
|
|
}
|
2018-08-22 15:52:10 +00:00
|
|
|
|
2021-03-23 18:18:21 +00:00
|
|
|
bool
|
|
|
|
Endpoint::SendToOrQueue(
|
|
|
|
const std::variant<Address, RouterID>& addr, const llarp_buffer_t& data, ProtocolType t)
|
|
|
|
{
|
|
|
|
return var::visit([&](auto& addr) { return SendToOrQueue(addr, data, t); }, addr);
|
|
|
|
}
|
|
|
|
|
2019-03-08 17:00:13 +00:00
|
|
|
bool
|
|
|
|
Endpoint::HasConvoTag(const ConvoTag& t) const
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
return Sessions().find(t) != Sessions().end();
|
2019-03-08 17:00:13 +00:00
|
|
|
}
|
|
|
|
|
2021-04-02 15:10:37 +00:00
|
|
|
std::optional<uint64_t>
|
2018-08-09 19:02:17 +00:00
|
|
|
Endpoint::GetSeqNoForConvo(const ConvoTag& tag)
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr == Sessions().end())
|
2021-04-02 15:10:37 +00:00
|
|
|
return std::nullopt;
|
|
|
|
return itr->second.seqno++;
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
|
|
|
|
2019-03-08 14:36:24 +00:00
|
|
|
bool
|
|
|
|
Endpoint::ShouldBuildMore(llarp_time_t now) const
|
|
|
|
{
|
2021-06-02 19:52:13 +00:00
|
|
|
if (BuildCooldownHit(now))
|
|
|
|
return false;
|
|
|
|
const auto requiredPaths = std::max(numDesiredPaths, path::min_intro_paths);
|
|
|
|
if (NumInStatus(path::ePathBuilding) >= requiredPaths)
|
2019-11-05 16:58:53 +00:00
|
|
|
return false;
|
2021-06-02 19:52:13 +00:00
|
|
|
return NumPathsExistingAt(now + (path::default_lifetime - path::intro_path_spread))
|
|
|
|
< requiredPaths;
|
2018-09-24 19:50:52 +00:00
|
|
|
}
|
|
|
|
|
2019-07-15 09:15:51 +00:00
|
|
|
AbstractRouter*
|
|
|
|
Endpoint::Router()
|
|
|
|
{
|
|
|
|
return m_state->m_Router;
|
|
|
|
}
|
|
|
|
|
2021-03-02 07:02:59 +00:00
|
|
|
const EventLoop_ptr&
|
|
|
|
Endpoint::Loop()
|
2021-03-02 02:06:20 +00:00
|
|
|
{
|
2021-03-02 07:02:59 +00:00
|
|
|
return Router()->loop();
|
2021-03-02 02:06:20 +00:00
|
|
|
}
|
|
|
|
|
2020-05-21 14:18:23 +00:00
|
|
|
void
|
|
|
|
Endpoint::BlacklistSNode(const RouterID snode)
|
|
|
|
{
|
|
|
|
m_state->m_SnodeBlacklist.insert(snode);
|
|
|
|
}
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
const std::set<RouterID>&
|
2019-07-15 09:15:51 +00:00
|
|
|
Endpoint::SnodeBlacklist() const
|
|
|
|
{
|
|
|
|
return m_state->m_SnodeBlacklist;
|
|
|
|
}
|
|
|
|
|
|
|
|
const IntroSet&
|
|
|
|
Endpoint::introSet() const
|
|
|
|
{
|
|
|
|
return m_state->m_IntroSet;
|
|
|
|
}
|
|
|
|
|
|
|
|
IntroSet&
|
|
|
|
Endpoint::introSet()
|
|
|
|
{
|
|
|
|
return m_state->m_IntroSet;
|
|
|
|
}
|
|
|
|
|
|
|
|
const ConvoMap&
|
|
|
|
Endpoint::Sessions() const
|
|
|
|
{
|
|
|
|
return m_state->m_Sessions;
|
|
|
|
}
|
|
|
|
|
|
|
|
ConvoMap&
|
|
|
|
Endpoint::Sessions()
|
|
|
|
{
|
|
|
|
return m_state->m_Sessions;
|
|
|
|
}
|
2020-06-24 13:24:07 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::SetAuthInfoForEndpoint(Address addr, AuthInfo info)
|
|
|
|
{
|
|
|
|
m_RemoteAuthInfos[addr] = std::move(info);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::MapExitRange(IPRange range, Address exit)
|
|
|
|
{
|
2021-04-05 19:53:47 +00:00
|
|
|
if (not exit.IsZero())
|
|
|
|
LogInfo(Name(), " map ", range, " to exit at ", exit);
|
2020-06-24 13:24:07 +00:00
|
|
|
m_ExitMap.Insert(range, exit);
|
|
|
|
}
|
2022-07-28 16:07:38 +00:00
|
|
|
bool
|
|
|
|
Endpoint::HasFlowToService(Address addr) const
|
|
|
|
{
|
|
|
|
return HasOutboundConvo(addr) or HasInboundConvo(addr);
|
|
|
|
}
|
2020-06-24 13:24:07 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::UnmapExitRange(IPRange range)
|
|
|
|
{
|
|
|
|
// unmap all ranges that fit in the range we gave
|
|
|
|
m_ExitMap.RemoveIf([&](const auto& item) -> bool {
|
|
|
|
if (not range.Contains(item.first))
|
|
|
|
return false;
|
2021-04-05 19:53:47 +00:00
|
|
|
LogInfo(Name(), " unmap ", item.first, " exit range mapping");
|
2020-06-24 13:24:07 +00:00
|
|
|
return true;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
std::optional<AuthInfo>
|
|
|
|
Endpoint::MaybeGetAuthInfoForEndpoint(Address remote)
|
|
|
|
{
|
|
|
|
const auto itr = m_RemoteAuthInfos.find(remote);
|
|
|
|
if (itr == m_RemoteAuthInfos.end())
|
|
|
|
return std::nullopt;
|
|
|
|
return itr->second;
|
|
|
|
}
|
|
|
|
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
quic::TunnelManager*
|
|
|
|
Endpoint::GetQUICTunnel()
|
|
|
|
{
|
|
|
|
return m_quic.get();
|
|
|
|
}
|
|
|
|
|
2018-07-12 18:21:44 +00:00
|
|
|
} // namespace service
|
2018-07-16 03:32:13 +00:00
|
|
|
} // namespace llarp
|