2020-02-24 18:15:12 +00:00
|
|
|
#include <chrono>
|
2020-03-12 17:50:46 +00:00
|
|
|
#include <memory>
|
2019-01-10 19:41:51 +00:00
|
|
|
#include <service/endpoint.hpp>
|
|
|
|
|
2020-03-02 16:54:11 +00:00
|
|
|
#include <dht/context.hpp>
|
2020-03-04 05:57:07 +00:00
|
|
|
#include <dht/key.hpp>
|
2018-12-12 00:48:54 +00:00
|
|
|
#include <dht/messages/findintro.hpp>
|
2019-01-16 00:24:16 +00:00
|
|
|
#include <dht/messages/findrouter.hpp>
|
|
|
|
#include <dht/messages/gotintro.hpp>
|
|
|
|
#include <dht/messages/gotrouter.hpp>
|
|
|
|
#include <dht/messages/pubintro.hpp>
|
2019-02-11 19:45:42 +00:00
|
|
|
#include <nodedb.hpp>
|
|
|
|
#include <profiling.hpp>
|
|
|
|
#include <router/abstractrouter.hpp>
|
2019-06-15 14:55:13 +00:00
|
|
|
#include <routing/dht_message.hpp>
|
2019-06-19 22:30:07 +00:00
|
|
|
#include <routing/path_transfer_message.hpp>
|
2019-07-15 09:15:51 +00:00
|
|
|
#include <service/endpoint_state.hpp>
|
2019-04-30 01:06:20 +00:00
|
|
|
#include <service/endpoint_util.hpp>
|
2019-04-21 16:44:27 +00:00
|
|
|
#include <service/hidden_service_address_lookup.hpp>
|
|
|
|
#include <service/outbound_context.hpp>
|
2018-12-12 02:15:08 +00:00
|
|
|
#include <service/protocol.hpp>
|
2019-09-01 13:26:16 +00:00
|
|
|
#include <util/thread/logic.hpp>
|
2019-04-18 11:49:54 +00:00
|
|
|
#include <util/str.hpp>
|
2019-01-10 19:41:51 +00:00
|
|
|
#include <util/buffer.hpp>
|
2019-09-01 12:38:03 +00:00
|
|
|
#include <util/meta/memfn.hpp>
|
2019-04-22 12:25:25 +00:00
|
|
|
#include <hook/shell.hpp>
|
2019-12-30 13:30:01 +00:00
|
|
|
#include <link/link_manager.hpp>
|
2020-03-04 05:57:07 +00:00
|
|
|
#include <tooling/dht_event.hpp>
|
2019-09-01 13:26:16 +00:00
|
|
|
|
2019-07-30 23:42:13 +00:00
|
|
|
#include <utility>
|
2019-01-10 19:41:51 +00:00
|
|
|
|
2018-07-11 16:11:19 +00:00
|
|
|
namespace llarp
|
|
|
|
{
|
|
|
|
namespace service
|
|
|
|
{
|
2020-04-30 19:40:20 +00:00
|
|
|
Endpoint::Endpoint(AbstractRouter* r, Context* parent)
|
2020-04-07 18:38:56 +00:00
|
|
|
: path::Builder(r, 3, path::default_len), context(parent), m_RecvQueue(128)
|
2018-07-11 16:11:19 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
m_state = std::make_unique<EndpointState>();
|
2019-07-15 09:15:51 +00:00
|
|
|
m_state->m_Router = r;
|
2020-04-30 19:40:20 +00:00
|
|
|
m_state->m_Name = "endpoint";
|
2019-11-28 23:08:02 +00:00
|
|
|
m_RecvQueue.enable();
|
2018-07-11 16:11:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2020-05-17 17:44:00 +00:00
|
|
|
Endpoint::Configure(const NetworkConfig& conf, [[maybe_unused]] const DnsConfig& dnsConf)
|
2018-07-11 16:11:19 +00:00
|
|
|
{
|
2020-06-08 12:42:10 +00:00
|
|
|
if (conf.m_Paths.has_value())
|
|
|
|
numPaths = *conf.m_Paths;
|
2020-05-01 17:03:47 +00:00
|
|
|
|
2020-06-08 12:42:10 +00:00
|
|
|
if (conf.m_Hops.has_value())
|
|
|
|
numHops = *conf.m_Hops;
|
2020-05-01 17:03:47 +00:00
|
|
|
|
2020-05-14 17:51:27 +00:00
|
|
|
return m_state->Configure(conf);
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
|
|
|
|
2019-04-08 12:01:52 +00:00
|
|
|
llarp_ev_loop_ptr
|
2018-08-17 19:49:58 +00:00
|
|
|
Endpoint::EndpointNetLoop()
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (m_state->m_IsolatedNetLoop)
|
2019-07-15 09:15:51 +00:00
|
|
|
return m_state->m_IsolatedNetLoop;
|
2019-07-06 17:03:40 +00:00
|
|
|
|
2019-07-15 09:15:51 +00:00
|
|
|
return Router()->netloop();
|
2018-08-17 19:49:58 +00:00
|
|
|
}
|
|
|
|
|
2018-08-16 14:34:15 +00:00
|
|
|
bool
|
|
|
|
Endpoint::NetworkIsIsolated() const
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
return m_state->m_IsolatedLogic.get() != nullptr && m_state->m_IsolatedNetLoop != nullptr;
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
|
|
|
|
2018-08-10 03:51:38 +00:00
|
|
|
bool
|
|
|
|
Endpoint::HasPendingPathToService(const Address& addr) const
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
return m_state->m_PendingServiceLookups.find(addr) != m_state->m_PendingServiceLookups.end();
|
2018-08-10 03:51:38 +00:00
|
|
|
}
|
|
|
|
|
2018-09-11 13:21:35 +00:00
|
|
|
void
|
2019-11-05 16:58:53 +00:00
|
|
|
Endpoint::RegenAndPublishIntroSet(bool forceRebuild)
|
2018-09-11 13:21:35 +00:00
|
|
|
{
|
2019-11-05 16:58:53 +00:00
|
|
|
const auto now = llarp::time_now_ms();
|
2020-04-07 18:38:56 +00:00
|
|
|
std::set<Introduction> introset;
|
|
|
|
if (!GetCurrentIntroductionsWithFilter(
|
|
|
|
introset, [now](const service::Introduction& intro) -> bool {
|
|
|
|
return not intro.ExpiresSoon(now, path::min_intro_lifetime);
|
|
|
|
}))
|
2018-09-11 13:21:35 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
LogWarn(
|
|
|
|
"could not publish descriptors for endpoint ",
|
|
|
|
Name(),
|
|
|
|
" because we couldn't get enough valid introductions");
|
|
|
|
if (ShouldBuildMore(now) || forceRebuild)
|
2018-11-22 15:52:04 +00:00
|
|
|
ManualRebuild(1);
|
2018-09-11 13:21:35 +00:00
|
|
|
return;
|
|
|
|
}
|
2019-07-15 09:15:51 +00:00
|
|
|
introSet().I.clear();
|
2020-04-07 18:38:56 +00:00
|
|
|
for (auto& intro : introset)
|
2018-09-11 13:21:35 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
introSet().I.emplace_back(std::move(intro));
|
2018-09-11 13:21:35 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
if (introSet().I.size() == 0)
|
2018-09-11 13:21:35 +00:00
|
|
|
{
|
2019-04-21 15:40:32 +00:00
|
|
|
LogWarn("not enough intros to publish introset for ", Name());
|
2020-04-07 18:38:56 +00:00
|
|
|
if (ShouldBuildMore(now) || forceRebuild)
|
2019-05-02 13:47:22 +00:00
|
|
|
ManualRebuild(1);
|
2018-09-11 13:21:35 +00:00
|
|
|
return;
|
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
auto maybe = m_Identity.EncryptAndSignIntroSet(introSet(), now);
|
2020-05-20 19:46:08 +00:00
|
|
|
if (not maybe)
|
2018-09-11 13:21:35 +00:00
|
|
|
{
|
2020-01-27 21:30:41 +00:00
|
|
|
LogWarn("failed to generate introset for endpoint ", Name());
|
2018-09-11 13:21:35 +00:00
|
|
|
return;
|
|
|
|
}
|
2020-05-20 19:46:08 +00:00
|
|
|
if (PublishIntroSet(*maybe, Router()))
|
2018-09-11 13:21:35 +00:00
|
|
|
{
|
2019-04-21 15:40:32 +00:00
|
|
|
LogInfo("(re)publishing introset for endpoint ", Name());
|
2018-09-11 13:21:35 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-04-21 15:40:32 +00:00
|
|
|
LogWarn("failed to publish intro set for endpoint ", Name());
|
2018-09-11 13:21:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-22 12:25:25 +00:00
|
|
|
bool
|
|
|
|
Endpoint::IsReady() const
|
|
|
|
{
|
|
|
|
const auto now = Now();
|
2020-04-07 18:38:56 +00:00
|
|
|
if (introSet().I.size() == 0)
|
2019-04-22 12:25:25 +00:00
|
|
|
return false;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (introSet().IsExpired(now))
|
2019-04-22 12:25:25 +00:00
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-10 12:47:21 +00:00
|
|
|
bool
|
|
|
|
Endpoint::HasPendingRouterLookup(const RouterID remote) const
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
const auto& routers = m_state->m_PendingRouters;
|
|
|
|
return routers.find(remote) != routers.end();
|
2019-06-10 12:47:21 +00:00
|
|
|
}
|
|
|
|
|
2019-07-01 14:56:56 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::GetEndpointWithConvoTag(
|
|
|
|
const ConvoTag tag, llarp::AlignedBuffer<32>& addr, bool& snode) const
|
2019-07-01 14:56:56 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr != Sessions().end())
|
2019-07-01 14:56:56 +00:00
|
|
|
{
|
|
|
|
snode = false;
|
2020-04-07 18:38:56 +00:00
|
|
|
addr = itr->second.remote.Addr();
|
2019-07-01 14:56:56 +00:00
|
|
|
return true;
|
|
|
|
}
|
2019-07-30 23:42:13 +00:00
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& item : m_state->m_SNodeSessions)
|
2019-07-01 14:56:56 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (item.second.second == tag)
|
2019-07-01 14:56:56 +00:00
|
|
|
{
|
2019-07-30 23:42:13 +00:00
|
|
|
snode = true;
|
2020-04-07 18:38:56 +00:00
|
|
|
addr = item.first;
|
2019-07-30 23:42:13 +00:00
|
|
|
return true;
|
2019-07-01 14:56:56 +00:00
|
|
|
}
|
|
|
|
}
|
2019-07-30 23:42:13 +00:00
|
|
|
|
2019-07-01 14:56:56 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-04-22 12:25:25 +00:00
|
|
|
bool
|
|
|
|
Endpoint::IntrosetIsStale() const
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
return introSet().HasExpiredIntros(Now());
|
2019-04-22 12:25:25 +00:00
|
|
|
}
|
|
|
|
|
2019-02-11 17:14:43 +00:00
|
|
|
util::StatusObject
|
|
|
|
Endpoint::ExtractStatus() const
|
2019-02-08 19:43:25 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
auto obj = path::Builder::ExtractStatus();
|
2019-08-19 09:33:26 +00:00
|
|
|
obj["identity"] = m_Identity.pub.Addr().ToString();
|
2019-07-15 09:15:51 +00:00
|
|
|
return m_state->ExtractStatus(obj);
|
2019-02-08 19:43:25 +00:00
|
|
|
}
|
|
|
|
|
2019-11-05 17:01:34 +00:00
|
|
|
void Endpoint::Tick(llarp_time_t)
|
2018-07-11 16:11:19 +00:00
|
|
|
{
|
2019-11-05 16:58:53 +00:00
|
|
|
const auto now = llarp::time_now_ms();
|
2019-04-23 16:13:22 +00:00
|
|
|
path::Builder::Tick(now);
|
2018-07-19 04:58:39 +00:00
|
|
|
// publish descriptors
|
2020-04-07 18:38:56 +00:00
|
|
|
if (ShouldPublishDescriptors(now))
|
2018-07-11 16:11:19 +00:00
|
|
|
{
|
2019-11-05 16:58:53 +00:00
|
|
|
RegenAndPublishIntroSet();
|
2018-07-11 16:11:19 +00:00
|
|
|
}
|
2019-02-21 19:26:59 +00:00
|
|
|
|
2020-03-02 17:02:21 +00:00
|
|
|
m_state->m_RemoteLookupFilter.Decay(now);
|
|
|
|
|
2018-12-13 12:27:14 +00:00
|
|
|
// expire snode sessions
|
2019-07-15 09:15:51 +00:00
|
|
|
EndpointUtil::ExpireSNodeSessions(now, m_state->m_SNodeSessions);
|
2018-07-23 07:38:29 +00:00
|
|
|
// expire pending tx
|
2019-07-15 09:15:51 +00:00
|
|
|
EndpointUtil::ExpirePendingTx(now, m_state->m_PendingLookups);
|
2018-08-14 21:17:18 +00:00
|
|
|
// expire pending router lookups
|
2019-07-15 09:15:51 +00:00
|
|
|
EndpointUtil::ExpirePendingRouterLookups(now, m_state->m_PendingRouters);
|
2018-08-14 21:17:18 +00:00
|
|
|
|
2019-02-05 14:50:33 +00:00
|
|
|
// deregister dead sessions
|
2019-07-15 09:15:51 +00:00
|
|
|
EndpointUtil::DeregisterDeadSessions(now, m_state->m_DeadSessions);
|
2018-07-23 07:38:29 +00:00
|
|
|
// tick remote sessions
|
2020-04-07 18:38:56 +00:00
|
|
|
EndpointUtil::TickRemoteSessions(
|
|
|
|
now, m_state->m_RemoteSessions, m_state->m_DeadSessions, Sessions());
|
2019-02-09 14:37:24 +00:00
|
|
|
// expire convotags
|
2019-07-15 09:15:51 +00:00
|
|
|
EndpointUtil::ExpireConvoSessions(now, Sessions());
|
2018-09-24 15:52:25 +00:00
|
|
|
}
|
|
|
|
|
2018-12-24 16:09:05 +00:00
|
|
|
bool
|
|
|
|
Endpoint::Stop()
|
|
|
|
{
|
|
|
|
// stop remote sessions
|
2019-07-15 09:15:51 +00:00
|
|
|
EndpointUtil::StopRemoteSessions(m_state->m_RemoteSessions);
|
2018-12-24 16:09:05 +00:00
|
|
|
// stop snode sessions
|
2019-07-15 09:15:51 +00:00
|
|
|
EndpointUtil::StopSnodeSessions(m_state->m_SNodeSessions);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (m_OnDown)
|
2019-04-22 12:25:25 +00:00
|
|
|
m_OnDown->NotifyAsync(NotifyParams());
|
2019-04-21 15:40:32 +00:00
|
|
|
return path::Builder::Stop();
|
2018-12-24 16:09:05 +00:00
|
|
|
}
|
|
|
|
|
2018-07-18 03:10:21 +00:00
|
|
|
uint64_t
|
|
|
|
Endpoint::GenTXID()
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
uint64_t txid = randint();
|
2019-07-15 09:15:51 +00:00
|
|
|
const auto& lookups = m_state->m_PendingLookups;
|
2020-04-07 18:38:56 +00:00
|
|
|
while (lookups.find(txid) != lookups.end())
|
2018-07-18 03:10:21 +00:00
|
|
|
++txid;
|
|
|
|
return txid;
|
2018-07-11 16:11:19 +00:00
|
|
|
}
|
|
|
|
|
2018-07-16 03:32:13 +00:00
|
|
|
std::string
|
|
|
|
Endpoint::Name() const
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
return m_state->m_Name + ":" + m_Identity.pub.Name();
|
2018-07-16 03:32:13 +00:00
|
|
|
}
|
|
|
|
|
2018-08-04 02:59:32 +00:00
|
|
|
void
|
|
|
|
Endpoint::PutLookup(IServiceLookup* lookup, uint64_t txid)
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
m_state->m_PendingLookups.emplace(txid, std::unique_ptr<IServiceLookup>(lookup));
|
2018-08-04 02:59:32 +00:00
|
|
|
}
|
|
|
|
|
2018-07-11 16:11:19 +00:00
|
|
|
bool
|
2019-05-03 13:15:03 +00:00
|
|
|
Endpoint::HandleGotIntroMessage(dht::GotIntroMessage_constptr msg)
|
2018-07-11 16:11:19 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
std::set<EncryptedIntroSet> remote;
|
|
|
|
for (const auto& introset : msg->found)
|
2018-07-11 16:11:19 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (not introset.Verify(Now()))
|
2018-07-11 16:11:19 +00:00
|
|
|
{
|
2020-01-27 21:30:41 +00:00
|
|
|
LogError(Name(), " got invalid introset");
|
|
|
|
return false;
|
2018-07-18 22:50:05 +00:00
|
|
|
}
|
2019-07-06 17:03:40 +00:00
|
|
|
remote.insert(introset);
|
2018-07-11 16:11:19 +00:00
|
|
|
}
|
2019-07-15 09:15:51 +00:00
|
|
|
auto& lookups = m_state->m_PendingLookups;
|
2020-04-07 18:38:56 +00:00
|
|
|
auto itr = lookups.find(msg->txid);
|
|
|
|
if (itr == lookups.end())
|
2018-07-18 03:10:21 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
LogWarn(
|
|
|
|
"invalid lookup response for hidden service endpoint ", Name(), " txid=", msg->txid);
|
2018-07-20 04:50:28 +00:00
|
|
|
return true;
|
2018-07-18 03:10:21 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
std::unique_ptr<IServiceLookup> lookup = std::move(itr->second);
|
2019-07-15 09:15:51 +00:00
|
|
|
lookups.erase(itr);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (not lookup->HandleResponse(remote))
|
2020-02-20 19:32:51 +00:00
|
|
|
lookups.emplace(msg->txid, std::move(lookup));
|
2018-08-14 21:17:18 +00:00
|
|
|
return true;
|
2018-07-11 16:11:19 +00:00
|
|
|
}
|
|
|
|
|
2019-06-14 12:49:45 +00:00
|
|
|
bool
|
|
|
|
Endpoint::HasInboundConvo(const Address& addr) const
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& item : Sessions())
|
2019-06-14 12:49:45 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (item.second.remote.Addr() == addr && item.second.inbound)
|
2019-06-14 12:49:45 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-08-09 19:02:17 +00:00
|
|
|
void
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::PutSenderFor(const ConvoTag& tag, const ServiceInfo& info, bool inbound)
|
2018-08-09 19:02:17 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr == Sessions().end())
|
2018-08-09 19:02:17 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
itr = Sessions().emplace(tag, Session{}).first;
|
2019-07-01 13:44:25 +00:00
|
|
|
itr->second.inbound = inbound;
|
2020-04-07 18:38:56 +00:00
|
|
|
itr->second.remote = info;
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
2018-10-29 16:48:36 +00:00
|
|
|
itr->second.lastUsed = Now();
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Endpoint::GetSenderFor(const ConvoTag& tag, ServiceInfo& si) const
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr == Sessions().end())
|
2018-08-09 19:02:17 +00:00
|
|
|
return false;
|
|
|
|
si = itr->second.remote;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::PutIntroFor(const ConvoTag& tag, const Introduction& intro)
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr == Sessions().end())
|
2018-08-09 19:02:17 +00:00
|
|
|
{
|
2019-06-14 12:49:45 +00:00
|
|
|
return;
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
itr->second.intro = intro;
|
2018-10-29 16:48:36 +00:00
|
|
|
itr->second.lastUsed = Now();
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Endpoint::GetIntroFor(const ConvoTag& tag, Introduction& intro) const
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr == Sessions().end())
|
2018-08-09 19:02:17 +00:00
|
|
|
return false;
|
|
|
|
intro = itr->second.intro;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-02-21 16:45:33 +00:00
|
|
|
void
|
|
|
|
Endpoint::PutReplyIntroFor(const ConvoTag& tag, const Introduction& intro)
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr == Sessions().end())
|
2019-02-21 16:45:33 +00:00
|
|
|
{
|
2019-06-14 12:49:45 +00:00
|
|
|
return;
|
2019-02-21 16:45:33 +00:00
|
|
|
}
|
|
|
|
itr->second.replyIntro = intro;
|
2020-04-07 18:38:56 +00:00
|
|
|
itr->second.lastUsed = Now();
|
2019-02-21 16:45:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Endpoint::GetReplyIntroFor(const ConvoTag& tag, Introduction& intro) const
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr == Sessions().end())
|
2019-02-21 16:45:33 +00:00
|
|
|
return false;
|
|
|
|
intro = itr->second.replyIntro;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-08-09 19:02:17 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::GetConvoTagsForService(const Address& addr, std::set<ConvoTag>& tags) const
|
2018-08-09 19:02:17 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
return EndpointUtil::GetConvoTagsForService(Sessions(), addr, tags);
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::GetCachedSessionKeyFor(const ConvoTag& tag, SharedSecret& secret) const
|
2018-08-09 19:02:17 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr == Sessions().end())
|
2018-08-09 19:02:17 +00:00
|
|
|
return false;
|
2019-01-02 01:04:04 +00:00
|
|
|
secret = itr->second.sharedKey;
|
2018-08-09 19:02:17 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::PutCachedSessionKeyFor(const ConvoTag& tag, const SharedSecret& k)
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr == Sessions().end())
|
2018-08-09 19:02:17 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
itr = Sessions().emplace(tag, Session{}).first;
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
|
|
|
itr->second.sharedKey = k;
|
2020-04-07 18:38:56 +00:00
|
|
|
itr->second.lastUsed = Now();
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
|
|
|
|
2019-09-19 20:28:12 +00:00
|
|
|
void
|
|
|
|
Endpoint::MarkConvoTagActive(const ConvoTag& tag)
|
|
|
|
{
|
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr != Sessions().end())
|
2019-09-19 20:28:12 +00:00
|
|
|
{
|
|
|
|
itr->second.lastUsed = Now();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-11 16:11:19 +00:00
|
|
|
bool
|
2019-01-16 21:12:24 +00:00
|
|
|
Endpoint::LoadKeyFile()
|
2018-07-11 16:11:19 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
const auto& keyfile = m_state->m_Keyfile;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!keyfile.empty())
|
2018-07-11 16:11:19 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!m_Identity.EnsureKeys(keyfile, Router()->keyManager()->needBackup()))
|
2019-01-16 21:12:24 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
LogError("Can't ensure keyfile [", keyfile, "]");
|
2018-07-11 16:11:19 +00:00
|
|
|
return false;
|
2019-01-16 21:12:24 +00:00
|
|
|
}
|
2018-07-11 16:11:19 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-05-28 19:45:08 +00:00
|
|
|
m_Identity.RegenerateKeys();
|
2018-07-11 16:11:19 +00:00
|
|
|
}
|
2019-01-16 21:12:24 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Endpoint::Start()
|
|
|
|
{
|
2020-03-02 17:02:21 +00:00
|
|
|
m_state->m_RemoteLookupFilter.DecayInterval(500ms);
|
2019-01-18 02:28:30 +00:00
|
|
|
// how can I tell if a m_Identity isn't loaded?
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!m_DataHandler)
|
2018-08-09 19:02:17 +00:00
|
|
|
{
|
|
|
|
m_DataHandler = this;
|
|
|
|
}
|
2018-08-16 14:34:15 +00:00
|
|
|
// this does network isolation
|
2020-04-07 18:38:56 +00:00
|
|
|
while (m_state->m_OnInit.size())
|
2018-08-09 19:02:17 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (m_state->m_OnInit.front()())
|
2019-07-15 09:15:51 +00:00
|
|
|
m_state->m_OnInit.pop_front();
|
2018-08-09 19:02:17 +00:00
|
|
|
else
|
2019-01-16 21:12:24 +00:00
|
|
|
{
|
2019-04-21 15:40:32 +00:00
|
|
|
LogWarn("Can't call init of network isolation");
|
2018-08-09 19:02:17 +00:00
|
|
|
return false;
|
2019-01-16 21:12:24 +00:00
|
|
|
}
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
2018-07-11 16:11:19 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
Endpoint::~Endpoint()
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (m_OnUp)
|
2019-04-22 12:25:25 +00:00
|
|
|
m_OnUp->Stop();
|
2020-04-07 18:38:56 +00:00
|
|
|
if (m_OnDown)
|
2019-04-22 12:25:25 +00:00
|
|
|
m_OnDown->Stop();
|
2020-04-07 18:38:56 +00:00
|
|
|
if (m_OnReady)
|
2019-04-22 12:25:25 +00:00
|
|
|
m_OnReady->Stop();
|
2018-07-11 16:11:19 +00:00
|
|
|
}
|
2018-07-18 03:10:21 +00:00
|
|
|
|
2020-02-10 17:52:24 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::PublishIntroSet(const EncryptedIntroSet& introset, AbstractRouter* r)
|
2020-02-10 17:52:24 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
const auto paths = GetManyPathsWithUniqueEndpoints(this, llarp::dht::IntroSetRelayRedundancy);
|
2020-02-26 20:27:27 +00:00
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
if (paths.size() != llarp::dht::IntroSetRelayRedundancy)
|
2020-03-01 15:48:43 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
LogWarn(
|
|
|
|
"Cannot publish intro set because we only have ",
|
|
|
|
paths.size(),
|
|
|
|
" paths, but need ",
|
|
|
|
llarp::dht::IntroSetRelayRedundancy);
|
2020-02-26 20:27:27 +00:00
|
|
|
return false;
|
2020-03-01 15:48:43 +00:00
|
|
|
}
|
2020-02-26 20:27:27 +00:00
|
|
|
|
2020-02-10 17:34:14 +00:00
|
|
|
// do publishing for each path selected
|
|
|
|
size_t published = 0;
|
2020-03-04 05:57:07 +00:00
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& path : paths)
|
2020-02-10 17:34:14 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
for (size_t i = 0; i < llarp::dht::IntroSetRequestsPerRelay; ++i)
|
2020-02-10 17:34:14 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
r->NotifyRouterEvent<tooling::PubIntroSentEvent>(
|
2020-03-07 01:20:11 +00:00
|
|
|
r->pubkey(),
|
|
|
|
llarp::dht::Key_t{introset.derivedSigningKey.as_array()},
|
2020-04-07 18:38:56 +00:00
|
|
|
RouterID(path->hops[path->hops.size() - 1].rc.pubkey),
|
|
|
|
published);
|
|
|
|
if (PublishIntroSetVia(introset, r, path, published))
|
2020-02-26 20:27:27 +00:00
|
|
|
published++;
|
2020-02-10 17:34:14 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
if (published != llarp::dht::IntroSetStorageRedundancy)
|
|
|
|
LogWarn(
|
|
|
|
"Publish introset failed: could only publish ",
|
|
|
|
published,
|
|
|
|
" copies but wanted ",
|
|
|
|
llarp::dht::IntroSetStorageRedundancy);
|
2020-03-02 16:54:11 +00:00
|
|
|
return published == llarp::dht::IntroSetStorageRedundancy;
|
2018-07-18 03:10:21 +00:00
|
|
|
}
|
|
|
|
|
2018-09-18 14:48:06 +00:00
|
|
|
struct PublishIntroSetJob : public IServiceLookup
|
|
|
|
{
|
2020-01-27 21:30:41 +00:00
|
|
|
EncryptedIntroSet m_IntroSet;
|
2018-09-18 14:48:06 +00:00
|
|
|
Endpoint* m_Endpoint;
|
2020-02-20 00:07:46 +00:00
|
|
|
uint64_t m_relayOrder;
|
2020-04-07 18:38:56 +00:00
|
|
|
PublishIntroSetJob(
|
|
|
|
Endpoint* parent, uint64_t id, EncryptedIntroSet introset, uint64_t relayOrder)
|
2018-09-18 14:48:06 +00:00
|
|
|
: IServiceLookup(parent, id, "PublishIntroSet")
|
2019-07-30 23:42:13 +00:00
|
|
|
, m_IntroSet(std::move(introset))
|
2018-09-18 14:48:06 +00:00
|
|
|
, m_Endpoint(parent)
|
2020-02-20 00:07:46 +00:00
|
|
|
, m_relayOrder(relayOrder)
|
2018-09-18 14:48:06 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
std::shared_ptr<routing::IMessage>
|
2019-07-30 23:42:13 +00:00
|
|
|
BuildRequestMessage() override
|
2018-09-18 14:48:06 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
auto msg = std::make_shared<routing::DHTMessage>();
|
|
|
|
msg->M.emplace_back(
|
|
|
|
std::make_unique<dht::PublishIntroMessage>(m_IntroSet, txid, true, m_relayOrder));
|
2018-09-18 14:48:06 +00:00
|
|
|
return msg;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
HandleResponse(const std::set<EncryptedIntroSet>& response) override
|
2018-09-18 14:48:06 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (not response.empty())
|
2018-09-18 14:48:06 +00:00
|
|
|
m_Endpoint->IntroSetPublished();
|
|
|
|
else
|
|
|
|
m_Endpoint->IntroSetPublishFail();
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-07-18 03:10:21 +00:00
|
|
|
void
|
|
|
|
Endpoint::IntroSetPublishFail()
|
|
|
|
{
|
2019-02-21 19:55:31 +00:00
|
|
|
auto now = Now();
|
2020-04-07 18:38:56 +00:00
|
|
|
if (ShouldPublishDescriptors(now))
|
2019-02-21 19:55:31 +00:00
|
|
|
{
|
2019-11-05 16:58:53 +00:00
|
|
|
RegenAndPublishIntroSet();
|
2019-02-21 19:55:31 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (NumInStatus(path::ePathEstablished) < 3)
|
2019-02-21 19:55:31 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (introSet().HasExpiredIntros(now))
|
2019-02-21 19:55:31 +00:00
|
|
|
ManualRebuild(1);
|
|
|
|
}
|
2018-09-18 14:48:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::PublishIntroSetVia(
|
|
|
|
const EncryptedIntroSet& introset,
|
|
|
|
AbstractRouter* r,
|
|
|
|
path::Path_ptr path,
|
|
|
|
uint64_t relayOrder)
|
2018-09-18 14:48:06 +00:00
|
|
|
{
|
2020-02-24 19:52:49 +00:00
|
|
|
auto job = new PublishIntroSetJob(this, GenTXID(), introset, relayOrder);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (job->SendRequestViaPath(path, r))
|
2018-09-18 14:48:06 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
m_state->m_LastPublishAttempt = Now();
|
2018-09-18 14:48:06 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
2018-07-18 03:10:21 +00:00
|
|
|
}
|
|
|
|
|
2019-05-07 17:46:38 +00:00
|
|
|
void
|
|
|
|
Endpoint::ResetInternalState()
|
|
|
|
{
|
|
|
|
path::Builder::ResetInternalState();
|
2019-07-01 14:56:56 +00:00
|
|
|
static auto resetState = [](auto& container, auto getter) {
|
|
|
|
std::for_each(container.begin(), container.end(), [getter](auto& item) {
|
|
|
|
getter(item)->ResetInternalState();
|
|
|
|
});
|
2019-05-07 17:46:38 +00:00
|
|
|
};
|
2020-04-07 18:38:56 +00:00
|
|
|
resetState(m_state->m_RemoteSessions, [](const auto& item) { return item.second; });
|
|
|
|
resetState(m_state->m_SNodeSessions, [](const auto& item) { return item.second.first; });
|
2019-05-07 17:46:38 +00:00
|
|
|
}
|
|
|
|
|
2018-07-18 03:10:21 +00:00
|
|
|
bool
|
2018-07-18 22:50:05 +00:00
|
|
|
Endpoint::ShouldPublishDescriptors(llarp_time_t now) const
|
2018-07-18 03:10:21 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (not m_PublishIntroSet)
|
2020-02-11 21:48:36 +00:00
|
|
|
return false;
|
2019-07-15 09:15:51 +00:00
|
|
|
|
2020-03-01 17:53:50 +00:00
|
|
|
auto next_pub = m_state->m_LastPublishAttempt
|
2020-04-07 18:38:56 +00:00
|
|
|
+ (m_state->m_IntroSet.HasExpiredIntros(now) ? INTROSET_PUBLISH_RETRY_INTERVAL
|
|
|
|
: INTROSET_PUBLISH_INTERVAL);
|
2020-03-01 17:53:50 +00:00
|
|
|
|
|
|
|
return now >= next_pub;
|
2018-07-18 03:10:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::IntroSetPublished()
|
|
|
|
{
|
2020-03-01 18:04:37 +00:00
|
|
|
const auto now = Now();
|
|
|
|
// We usually get 4 confirmations back (one for each DHT location), which
|
|
|
|
// is noisy: suppress this log message if we already had a confirmation in
|
|
|
|
// the last second.
|
2020-04-07 18:38:56 +00:00
|
|
|
if (m_state->m_LastPublish < now - 1s)
|
2020-03-01 18:04:37 +00:00
|
|
|
LogInfo(Name(), " IntroSet publish confirmed");
|
|
|
|
else
|
|
|
|
LogDebug(Name(), " Additional IntroSet publish confirmed");
|
|
|
|
|
|
|
|
m_state->m_LastPublish = now;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (m_OnReady)
|
2019-04-22 12:25:25 +00:00
|
|
|
m_OnReady->NotifyAsync(NotifyParams());
|
|
|
|
m_OnReady = nullptr;
|
2018-07-18 03:10:21 +00:00
|
|
|
}
|
|
|
|
|
2018-08-17 19:49:58 +00:00
|
|
|
void
|
2019-05-22 16:20:03 +00:00
|
|
|
Endpoint::IsolatedNetworkMainLoop()
|
2018-08-17 19:49:58 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
m_state->m_IsolatedNetLoop = llarp_make_ev_loop();
|
2020-04-07 18:38:56 +00:00
|
|
|
m_state->m_IsolatedLogic = std::make_shared<llarp::Logic>();
|
|
|
|
if (SetupNetworking())
|
|
|
|
llarp_ev_loop_run_single_process(m_state->m_IsolatedNetLoop, m_state->m_IsolatedLogic);
|
2019-05-22 16:20:03 +00:00
|
|
|
else
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
m_state->m_IsolatedNetLoop.reset();
|
|
|
|
m_state->m_IsolatedLogic.reset();
|
2019-05-22 16:20:03 +00:00
|
|
|
}
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
|
|
|
|
2019-05-10 16:19:33 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::SelectHop(
|
|
|
|
llarp_nodedb* db,
|
|
|
|
const std::set<RouterID>& prev,
|
|
|
|
RouterContact& cur,
|
|
|
|
size_t hop,
|
|
|
|
path::PathRole roles)
|
2019-05-10 16:19:33 +00:00
|
|
|
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
std::set<RouterID> exclude = prev;
|
|
|
|
for (const auto& snode : SnodeBlacklist())
|
2019-05-10 16:19:33 +00:00
|
|
|
exclude.insert(snode);
|
2020-06-02 11:14:13 +00:00
|
|
|
if (hop == numHops - 1 and numHops > 1)
|
2020-01-06 15:28:37 +00:00
|
|
|
{
|
|
|
|
// diversify endpoints
|
2020-04-07 18:38:56 +00:00
|
|
|
ForEachPath([&exclude](const path::Path_ptr& path) { exclude.insert(path->Endpoint()); });
|
2020-01-06 15:28:37 +00:00
|
|
|
}
|
2019-05-10 16:19:33 +00:00
|
|
|
return path::Builder::SelectHop(db, exclude, cur, hop, roles);
|
|
|
|
}
|
|
|
|
|
2020-02-24 18:15:12 +00:00
|
|
|
void
|
|
|
|
Endpoint::PathBuildStarted(path::Path_ptr path)
|
|
|
|
{
|
|
|
|
path::Builder::PathBuildStarted(path);
|
|
|
|
}
|
|
|
|
|
2018-07-22 23:14:29 +00:00
|
|
|
void
|
2019-04-21 15:40:32 +00:00
|
|
|
Endpoint::PutNewOutboundContext(const service::IntroSet& introset)
|
2018-07-22 23:14:29 +00:00
|
|
|
{
|
|
|
|
Address addr;
|
2019-01-02 01:03:53 +00:00
|
|
|
introset.A.CalculateAddress(addr.as_array());
|
2018-07-22 23:14:29 +00:00
|
|
|
|
2019-07-15 09:15:51 +00:00
|
|
|
auto& remoteSessions = m_state->m_RemoteSessions;
|
|
|
|
auto& serviceLookups = m_state->m_PendingServiceLookups;
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
if (remoteSessions.count(addr) >= MAX_OUTBOUND_CONTEXT_COUNT)
|
2018-07-22 23:14:29 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = remoteSessions.find(addr);
|
2018-09-28 12:22:50 +00:00
|
|
|
|
2019-07-15 09:15:51 +00:00
|
|
|
auto range = serviceLookups.equal_range(addr);
|
2020-04-07 18:38:56 +00:00
|
|
|
auto i = range.first;
|
|
|
|
while (i != range.second)
|
2018-09-28 12:22:50 +00:00
|
|
|
{
|
2019-02-06 15:05:25 +00:00
|
|
|
i->second(addr, itr->second.get());
|
|
|
|
++i;
|
2018-09-28 12:22:50 +00:00
|
|
|
}
|
2019-07-15 09:15:51 +00:00
|
|
|
serviceLookups.erase(addr);
|
2018-09-28 12:22:50 +00:00
|
|
|
return;
|
2018-07-22 23:14:29 +00:00
|
|
|
}
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
auto it = remoteSessions.emplace(addr, std::make_shared<OutboundContext>(introset, this));
|
2019-04-21 15:40:32 +00:00
|
|
|
LogInfo("Created New outbound context for ", addr.ToString());
|
2018-09-28 12:22:50 +00:00
|
|
|
|
2018-07-22 23:14:29 +00:00
|
|
|
// inform pending
|
2019-07-15 09:15:51 +00:00
|
|
|
auto range = serviceLookups.equal_range(addr);
|
2020-04-07 18:38:56 +00:00
|
|
|
auto itr = range.first;
|
|
|
|
if (itr != range.second)
|
2018-07-22 23:14:29 +00:00
|
|
|
{
|
2019-04-22 17:38:29 +00:00
|
|
|
itr->second(addr, it->second.get());
|
2019-02-06 15:05:25 +00:00
|
|
|
++itr;
|
2018-07-22 23:14:29 +00:00
|
|
|
}
|
2019-07-15 09:15:51 +00:00
|
|
|
serviceLookups.erase(addr);
|
2018-07-22 23:14:29 +00:00
|
|
|
}
|
|
|
|
|
2019-05-09 20:28:56 +00:00
|
|
|
void
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::HandleVerifyGotRouter(dht::GotRouterMessage_constptr msg, llarp_async_verify_rc* j)
|
2019-05-09 20:28:56 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto& pendingRouters = m_state->m_PendingRouters;
|
2020-04-07 18:38:56 +00:00
|
|
|
auto itr = pendingRouters.find(j->rc.pubkey);
|
|
|
|
if (itr != pendingRouters.end())
|
2019-05-09 20:28:56 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (j->valid)
|
2020-01-23 12:11:11 +00:00
|
|
|
itr->second.InformResult(msg->foundRCs);
|
2019-05-09 20:28:56 +00:00
|
|
|
else
|
|
|
|
itr->second.InformResult({});
|
2019-07-15 09:15:51 +00:00
|
|
|
pendingRouters.erase(itr);
|
2019-05-09 20:28:56 +00:00
|
|
|
}
|
|
|
|
delete j;
|
|
|
|
}
|
|
|
|
|
2018-08-10 21:34:11 +00:00
|
|
|
bool
|
2019-05-03 13:15:03 +00:00
|
|
|
Endpoint::HandleGotRouterMessage(dht::GotRouterMessage_constptr msg)
|
2018-08-10 21:34:11 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (not msg->foundRCs.empty())
|
2020-01-23 12:11:11 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& rc : msg->foundRCs)
|
2020-01-23 12:11:11 +00:00
|
|
|
{
|
|
|
|
llarp_async_verify_rc* job = new llarp_async_verify_rc();
|
2020-04-07 18:38:56 +00:00
|
|
|
job->nodedb = Router()->nodedb();
|
|
|
|
job->cryptoworker = Router()->threadpool();
|
|
|
|
job->diskworker = Router()->diskworker();
|
|
|
|
job->logic = Router()->logic();
|
|
|
|
job->hook = std::bind(&Endpoint::HandleVerifyGotRouter, this, msg, std::placeholders::_1);
|
|
|
|
job->rc = rc;
|
2020-01-23 12:11:11 +00:00
|
|
|
llarp_nodedb_async_verify(job);
|
|
|
|
}
|
2019-05-03 13:15:03 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto& routers = m_state->m_PendingRouters;
|
2020-04-07 18:38:56 +00:00
|
|
|
auto itr = routers.begin();
|
|
|
|
while (itr != routers.end())
|
2019-05-09 15:51:21 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr->second.txid == msg->txid)
|
2019-05-09 15:51:21 +00:00
|
|
|
{
|
|
|
|
itr->second.InformResult({});
|
2019-07-15 09:15:51 +00:00
|
|
|
itr = routers.erase(itr);
|
2019-05-09 15:51:21 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
++itr;
|
|
|
|
}
|
2018-08-10 21:34:11 +00:00
|
|
|
}
|
2019-04-17 14:46:00 +00:00
|
|
|
return true;
|
2018-08-10 21:34:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::EnsureRouterIsKnown(const RouterID& router)
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (router.IsZero())
|
2018-08-14 22:07:58 +00:00
|
|
|
return;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!Router()->nodedb()->Has(router))
|
2018-08-10 21:34:11 +00:00
|
|
|
{
|
2019-05-03 13:15:03 +00:00
|
|
|
LookupRouterAnon(router, nullptr);
|
2018-12-19 17:48:29 +00:00
|
|
|
}
|
|
|
|
}
|
2018-08-10 21:34:11 +00:00
|
|
|
|
2018-12-19 17:48:29 +00:00
|
|
|
bool
|
2019-05-03 13:15:03 +00:00
|
|
|
Endpoint::LookupRouterAnon(RouterID router, RouterLookupHandler handler)
|
2018-12-19 17:48:29 +00:00
|
|
|
{
|
2020-03-12 17:50:46 +00:00
|
|
|
using llarp::dht::FindRouterMessage;
|
|
|
|
|
2019-07-15 09:15:51 +00:00
|
|
|
auto& routers = m_state->m_PendingRouters;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (routers.find(router) == routers.end())
|
2018-12-19 17:48:29 +00:00
|
|
|
{
|
|
|
|
auto path = GetEstablishedPathClosestTo(router);
|
|
|
|
routing::DHTMessage msg;
|
|
|
|
auto txid = GenTXID();
|
2020-04-07 18:38:56 +00:00
|
|
|
msg.M.emplace_back(std::make_unique<FindRouterMessage>(txid, router));
|
2018-12-19 17:48:29 +00:00
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
if (path && path->SendRoutingMessage(msg, Router()))
|
2018-12-19 17:48:29 +00:00
|
|
|
{
|
2020-03-12 17:50:46 +00:00
|
|
|
RouterLookupJob job(this, handler);
|
|
|
|
|
|
|
|
assert(msg.M.size() == 1);
|
2020-04-07 18:38:56 +00:00
|
|
|
auto dhtMsg = dynamic_cast<FindRouterMessage*>(msg.M[0].get());
|
2020-04-10 17:26:50 +00:00
|
|
|
assert(dhtMsg != nullptr);
|
2020-03-12 17:50:46 +00:00
|
|
|
|
2020-04-10 17:26:50 +00:00
|
|
|
m_router->NotifyRouterEvent<tooling::FindRouterSentEvent>(m_router->pubkey(), *dhtMsg);
|
2020-03-10 18:12:11 +00:00
|
|
|
|
2019-07-15 09:15:51 +00:00
|
|
|
routers.emplace(router, RouterLookupJob(this, handler));
|
2018-12-19 17:48:29 +00:00
|
|
|
return true;
|
2018-08-10 21:34:11 +00:00
|
|
|
}
|
|
|
|
}
|
2018-12-19 17:48:29 +00:00
|
|
|
return false;
|
2018-08-10 21:34:11 +00:00
|
|
|
}
|
|
|
|
|
2018-08-02 00:48:43 +00:00
|
|
|
void
|
2019-04-23 14:28:59 +00:00
|
|
|
Endpoint::HandlePathBuilt(path::Path_ptr p)
|
2018-08-02 00:48:43 +00:00
|
|
|
{
|
2019-06-02 21:19:10 +00:00
|
|
|
p->SetDataHandler(util::memFn(&Endpoint::HandleHiddenServiceFrame, this));
|
|
|
|
p->SetDropHandler(util::memFn(&Endpoint::HandleDataDrop, this));
|
|
|
|
p->SetDeadChecker(util::memFn(&Endpoint::CheckPathIsDead, this));
|
2018-09-26 13:04:25 +00:00
|
|
|
path::Builder::HandlePathBuilt(p);
|
2018-08-02 00:48:43 +00:00
|
|
|
}
|
|
|
|
|
2018-09-11 15:28:36 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::HandleDataDrop(path::Path_ptr p, const PathID_t& dst, uint64_t seq)
|
2018-09-11 15:28:36 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
LogWarn(Name(), " message ", seq, " dropped by endpoint ", p->Endpoint(), " via ", dst);
|
2018-09-11 15:28:36 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
std::unordered_map<std::string, std::string>
|
2019-04-22 12:25:25 +00:00
|
|
|
Endpoint::NotifyParams() const
|
|
|
|
{
|
|
|
|
return {{"LOKINET_ADDR", m_Identity.pub.Addr().ToString()}};
|
|
|
|
}
|
|
|
|
|
2019-11-28 23:08:02 +00:00
|
|
|
void
|
|
|
|
Endpoint::FlushRecvData()
|
|
|
|
{
|
|
|
|
do
|
|
|
|
{
|
|
|
|
auto maybe = m_RecvQueue.tryPopFront();
|
2020-05-20 19:46:08 +00:00
|
|
|
if (not maybe)
|
2019-11-28 23:08:02 +00:00
|
|
|
return;
|
2020-05-20 19:46:08 +00:00
|
|
|
auto ev = std::move(*maybe);
|
2019-11-28 23:08:02 +00:00
|
|
|
ProtocolMessage::ProcessAsync(ev.fromPath, ev.pathid, ev.msg);
|
2020-04-07 18:38:56 +00:00
|
|
|
} while (true);
|
2019-11-28 23:08:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::QueueRecvData(RecvDataEvent ev)
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (m_RecvQueue.full() || m_RecvQueue.empty())
|
2019-11-28 23:08:02 +00:00
|
|
|
{
|
|
|
|
auto self = this;
|
|
|
|
LogicCall(m_router->logic(), [self]() { self->FlushRecvData(); });
|
|
|
|
}
|
|
|
|
m_RecvQueue.pushBack(std::move(ev));
|
|
|
|
}
|
|
|
|
|
2018-09-18 11:08:47 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::HandleDataMessage(
|
|
|
|
path::Path_ptr path, const PathID_t from, std::shared_ptr<ProtocolMessage> msg)
|
2018-09-18 11:08:47 +00:00
|
|
|
{
|
2019-06-06 10:52:27 +00:00
|
|
|
msg->sender.UpdateAddr();
|
2019-06-14 12:49:45 +00:00
|
|
|
PutSenderFor(msg->tag, msg->sender, true);
|
2019-06-28 14:12:20 +00:00
|
|
|
PutReplyIntroFor(msg->tag, path->intro);
|
|
|
|
Introduction intro;
|
2020-04-07 18:38:56 +00:00
|
|
|
intro.pathID = from;
|
|
|
|
intro.router = PubKey(path->Endpoint());
|
2019-06-28 14:48:00 +00:00
|
|
|
intro.expiresAt = std::min(path->ExpireTime(), msg->introReply.expiresAt);
|
2019-06-28 14:12:20 +00:00
|
|
|
PutIntroFor(msg->tag, intro);
|
2018-09-18 17:48:26 +00:00
|
|
|
return ProcessDataMessage(msg);
|
2018-09-18 11:08:47 +00:00
|
|
|
}
|
|
|
|
|
2018-11-29 14:01:13 +00:00
|
|
|
bool
|
2019-07-01 13:44:25 +00:00
|
|
|
Endpoint::HasPathToSNode(const RouterID ident) const
|
2018-11-29 14:01:13 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto range = m_state->m_SNodeSessions.equal_range(ident);
|
2020-04-07 18:38:56 +00:00
|
|
|
auto itr = range.first;
|
|
|
|
while (itr != range.second)
|
2018-12-02 18:07:07 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr->second.first->IsReady())
|
2018-11-29 14:01:13 +00:00
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
++itr;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-11-29 13:12:35 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::ProcessDataMessage(std::shared_ptr<ProtocolMessage> msg)
|
2018-11-29 13:12:35 +00:00
|
|
|
{
|
2020-05-21 14:18:23 +00:00
|
|
|
if ((msg->proto == eProtocolExit
|
|
|
|
&& (m_state->m_ExitEnabled || msg->sender.Addr() == m_state->m_ExitNode))
|
|
|
|
|| msg->proto == eProtocolTrafficV4 || msg->proto == eProtocolTrafficV6)
|
2018-11-29 13:12:35 +00:00
|
|
|
{
|
De-abseil, part 2: mutex, locks, (most) time
- util::Mutex is now a std::shared_timed_mutex, which is capable of
exclusive and shared locks.
- util::Lock is still present as a std::lock_guard<util::Mutex>.
- the locking annotations are preserved, but updated to the latest
supported by clang rather than using abseil's older/deprecated ones.
- ACQUIRE_LOCK macro is gone since we don't pass mutexes by pointer into
locks anymore (WTF abseil).
- ReleasableLock is gone. Instead there are now some llarp::util helper
methods to obtain unique and/or shared locks:
- `auto lock = util::unique_lock(mutex);` gets an RAII-but-also
unlockable object (std::unique_lock<T>, with T inferred from
`mutex`).
- `auto lock = util::shared_lock(mutex);` gets an RAII shared (i.e.
"reader") lock of the mutex.
- `auto lock = util::unique_locks(mutex1, mutex2, mutex3);` can be
used to atomically lock multiple mutexes at once (returning a
tuple of the locks).
This are templated on the mutex which makes them a bit more flexible
than using a concrete type: they can be used for any type of lockable
mutex, not only util::Mutex. (Some of the code here uses them for
getting locks around a std::mutex). Until C++17, using the RAII types
is painfully verbose:
```C++
// pre-C++17 - needing to figure out the mutex type here is annoying:
std::unique_lock<util::Mutex> lock(mutex);
// pre-C++17 and even more verbose (but at least the type isn't needed):
std::unique_lock<decltype(mutex)> lock(mutex);
// our compromise:
auto lock = util::unique_lock(mutex);
// C++17:
std::unique_lock lock(mutex);
```
All of these functions will also warn (under gcc or clang) if you
discard the return value. You can also do fancy things like
`auto l = util::unique_lock(mutex, std::adopt_lock)` (which lets a
lock take over an already-locked mutex).
- metrics code is gone, which also removes a big pile of code that was
only used by metrics:
- llarp::util::Scheduler
- llarp::thread::TimerQueue
- llarp::util::Stopwatch
2020-02-21 17:21:11 +00:00
|
|
|
util::Lock l(m_state->m_InboundTrafficQueueMutex);
|
2019-07-15 09:15:51 +00:00
|
|
|
m_state->m_InboundTrafficQueue.emplace(msg);
|
2019-05-22 17:47:33 +00:00
|
|
|
return true;
|
2018-11-29 13:12:35 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
if (msg->proto == eProtocolControl)
|
2018-11-29 13:12:35 +00:00
|
|
|
{
|
|
|
|
// TODO: implement me (?)
|
2019-02-09 15:26:20 +00:00
|
|
|
// right now it's just random noise
|
2018-11-29 13:12:35 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-03-08 16:00:45 +00:00
|
|
|
void
|
|
|
|
Endpoint::RemoveConvoTag(const ConvoTag& t)
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
Sessions().erase(t);
|
2019-03-08 16:00:45 +00:00
|
|
|
}
|
|
|
|
|
2018-08-02 00:48:43 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::HandleHiddenServiceFrame(path::Path_ptr p, const ProtocolFrame& frame)
|
2018-08-02 00:48:43 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (frame.R)
|
2019-03-08 16:00:45 +00:00
|
|
|
{
|
|
|
|
// handle discard
|
|
|
|
ServiceInfo si;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!GetSenderFor(frame.T, si))
|
2019-03-08 16:00:45 +00:00
|
|
|
return false;
|
|
|
|
// verify source
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!frame.Verify(si))
|
2019-03-08 16:00:45 +00:00
|
|
|
return false;
|
|
|
|
// remove convotag it doesn't exist
|
2019-04-22 17:38:29 +00:00
|
|
|
LogWarn("remove convotag T=", frame.T);
|
|
|
|
RemoveConvoTag(frame.T);
|
2019-03-08 16:00:45 +00:00
|
|
|
return true;
|
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!frame.AsyncDecryptAndVerify(
|
|
|
|
EndpointLogic(), p, CryptoWorker(), m_Identity, m_DataHandler))
|
2019-03-08 16:00:45 +00:00
|
|
|
{
|
|
|
|
// send discard
|
|
|
|
ProtocolFrame f;
|
|
|
|
f.R = 1;
|
2019-04-22 17:38:29 +00:00
|
|
|
f.T = frame.T;
|
2019-03-08 16:00:45 +00:00
|
|
|
f.F = p->intro.pathID;
|
2019-04-28 17:33:27 +00:00
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!f.Sign(m_Identity))
|
2019-03-08 16:00:45 +00:00
|
|
|
return false;
|
2019-05-02 16:23:31 +00:00
|
|
|
{
|
2020-02-17 22:33:45 +00:00
|
|
|
LogWarn("invalidating convotag T=", frame.T);
|
De-abseil, part 2: mutex, locks, (most) time
- util::Mutex is now a std::shared_timed_mutex, which is capable of
exclusive and shared locks.
- util::Lock is still present as a std::lock_guard<util::Mutex>.
- the locking annotations are preserved, but updated to the latest
supported by clang rather than using abseil's older/deprecated ones.
- ACQUIRE_LOCK macro is gone since we don't pass mutexes by pointer into
locks anymore (WTF abseil).
- ReleasableLock is gone. Instead there are now some llarp::util helper
methods to obtain unique and/or shared locks:
- `auto lock = util::unique_lock(mutex);` gets an RAII-but-also
unlockable object (std::unique_lock<T>, with T inferred from
`mutex`).
- `auto lock = util::shared_lock(mutex);` gets an RAII shared (i.e.
"reader") lock of the mutex.
- `auto lock = util::unique_locks(mutex1, mutex2, mutex3);` can be
used to atomically lock multiple mutexes at once (returning a
tuple of the locks).
This are templated on the mutex which makes them a bit more flexible
than using a concrete type: they can be used for any type of lockable
mutex, not only util::Mutex. (Some of the code here uses them for
getting locks around a std::mutex). Until C++17, using the RAII types
is painfully verbose:
```C++
// pre-C++17 - needing to figure out the mutex type here is annoying:
std::unique_lock<util::Mutex> lock(mutex);
// pre-C++17 and even more verbose (but at least the type isn't needed):
std::unique_lock<decltype(mutex)> lock(mutex);
// our compromise:
auto lock = util::unique_lock(mutex);
// C++17:
std::unique_lock lock(mutex);
```
All of these functions will also warn (under gcc or clang) if you
discard the return value. You can also do fancy things like
`auto l = util::unique_lock(mutex, std::adopt_lock)` (which lets a
lock take over an already-locked mutex).
- metrics code is gone, which also removes a big pile of code that was
only used by metrics:
- llarp::util::Scheduler
- llarp::thread::TimerQueue
- llarp::util::Stopwatch
2020-02-21 17:21:11 +00:00
|
|
|
util::Lock lock(m_state->m_SendQueueMutex);
|
2019-07-15 09:15:51 +00:00
|
|
|
m_state->m_SendQueue.emplace_back(
|
2020-04-07 18:38:56 +00:00
|
|
|
std::make_shared<const routing::PathTransferMessage>(f, frame.F), p);
|
2019-05-02 16:23:31 +00:00
|
|
|
}
|
2019-04-23 14:28:59 +00:00
|
|
|
return true;
|
2019-03-08 16:00:45 +00:00
|
|
|
}
|
|
|
|
return true;
|
2018-08-02 00:48:43 +00:00
|
|
|
}
|
|
|
|
|
2019-04-23 16:13:22 +00:00
|
|
|
void Endpoint::HandlePathDied(path::Path_ptr)
|
2018-09-17 15:32:37 +00:00
|
|
|
{
|
2019-11-05 16:58:53 +00:00
|
|
|
RegenAndPublishIntroSet(true);
|
2019-03-30 13:02:10 +00:00
|
|
|
}
|
|
|
|
|
2018-09-13 12:27:28 +00:00
|
|
|
bool
|
2019-04-23 14:28:59 +00:00
|
|
|
Endpoint::CheckPathIsDead(path::Path_ptr, llarp_time_t dlt)
|
2018-09-13 12:27:28 +00:00
|
|
|
{
|
2019-04-05 14:58:22 +00:00
|
|
|
return dlt > path::alive_timeout;
|
2018-08-02 00:48:43 +00:00
|
|
|
}
|
|
|
|
|
2018-08-10 21:34:11 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::OnLookup(
|
2020-05-01 19:51:15 +00:00
|
|
|
const Address& addr, std::optional<IntroSet> introset, const RouterID& endpoint)
|
2018-08-10 21:34:11 +00:00
|
|
|
{
|
2019-08-02 09:27:27 +00:00
|
|
|
const auto now = Router()->Now();
|
2020-04-07 18:38:56 +00:00
|
|
|
auto& fails = m_state->m_ServiceLookupFails;
|
|
|
|
auto& lookups = m_state->m_PendingServiceLookups;
|
2020-05-20 19:46:08 +00:00
|
|
|
if (not introset or introset->IsExpired(now))
|
2018-09-10 16:36:36 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
LogError(Name(), " failed to lookup ", addr.ToString(), " from ", endpoint);
|
2019-07-15 09:15:51 +00:00
|
|
|
fails[endpoint] = fails[endpoint] + 1;
|
2020-03-09 20:47:27 +00:00
|
|
|
// inform one
|
2019-07-29 15:10:20 +00:00
|
|
|
auto range = lookups.equal_range(addr);
|
2020-04-07 18:38:56 +00:00
|
|
|
auto itr = range.first;
|
|
|
|
if (itr != range.second)
|
2018-09-10 16:36:36 +00:00
|
|
|
{
|
2019-02-06 15:05:25 +00:00
|
|
|
itr->second(addr, nullptr);
|
2019-07-29 15:10:20 +00:00
|
|
|
itr = lookups.erase(itr);
|
2018-09-10 16:36:36 +00:00
|
|
|
}
|
2018-08-10 21:34:11 +00:00
|
|
|
return false;
|
2018-09-10 16:36:36 +00:00
|
|
|
}
|
2020-03-02 16:56:47 +00:00
|
|
|
// check for established outbound context
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
if (m_state->m_RemoteSessions.count(addr) > 0)
|
2020-03-02 16:56:47 +00:00
|
|
|
return true;
|
|
|
|
|
2020-05-20 19:46:08 +00:00
|
|
|
PutNewOutboundContext(*introset);
|
2018-08-10 21:34:11 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-02-18 16:00:45 +00:00
|
|
|
void
|
|
|
|
Endpoint::MarkAddressOutbound(const Address& addr)
|
|
|
|
{
|
|
|
|
m_state->m_OutboundSessions.insert(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Endpoint::WantsOutboundSession(const Address& addr) const
|
|
|
|
{
|
|
|
|
return m_state->m_OutboundSessions.count(addr) > 0;
|
|
|
|
}
|
|
|
|
|
2018-07-19 04:58:39 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::EnsurePathToService(
|
|
|
|
const Address remote, PathEnsureHook hook, llarp_time_t /*timeoutMS*/)
|
2018-07-19 04:58:39 +00:00
|
|
|
{
|
2020-03-02 17:02:21 +00:00
|
|
|
/// how many routers to use for lookups
|
2020-03-02 16:17:50 +00:00
|
|
|
static constexpr size_t NumParallelLookups = 2;
|
2020-03-02 17:02:21 +00:00
|
|
|
/// how many requests per router
|
|
|
|
static constexpr size_t RequestsPerLookup = 2;
|
2019-07-15 09:15:51 +00:00
|
|
|
|
2020-02-18 16:00:45 +00:00
|
|
|
MarkAddressOutbound(remote);
|
|
|
|
|
2019-07-15 09:15:51 +00:00
|
|
|
auto& sessions = m_state->m_RemoteSessions;
|
|
|
|
|
2018-07-22 23:14:29 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = sessions.find(remote);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr != sessions.end())
|
2018-07-22 23:14:29 +00:00
|
|
|
{
|
2018-08-22 15:52:10 +00:00
|
|
|
hook(itr->first, itr->second.get());
|
2018-07-22 23:14:29 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2019-02-06 15:52:00 +00:00
|
|
|
|
2020-03-02 17:02:21 +00:00
|
|
|
// filter check for address
|
2020-04-07 18:38:56 +00:00
|
|
|
if (not m_state->m_RemoteLookupFilter.Insert(remote))
|
2020-03-02 17:02:21 +00:00
|
|
|
return false;
|
|
|
|
|
2019-07-15 09:15:51 +00:00
|
|
|
auto& lookups = m_state->m_PendingServiceLookups;
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
const auto paths = GetManyPathsWithUniqueEndpoints(this, NumParallelLookups);
|
2019-02-06 15:52:00 +00:00
|
|
|
|
2019-04-22 17:38:29 +00:00
|
|
|
using namespace std::placeholders;
|
2020-04-07 18:38:56 +00:00
|
|
|
size_t lookedUp = 0;
|
2020-02-10 17:52:24 +00:00
|
|
|
const dht::Key_t location = remote.ToKey();
|
2020-04-07 18:38:56 +00:00
|
|
|
uint64_t order = 0;
|
|
|
|
for (const auto& path : paths)
|
2019-02-06 15:05:25 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
for (size_t count = 0; count < RequestsPerLookup; ++count)
|
2020-02-10 17:52:24 +00:00
|
|
|
{
|
2020-03-02 16:12:29 +00:00
|
|
|
HiddenServiceAddressLookup* job = new HiddenServiceAddressLookup(
|
2020-04-07 18:38:56 +00:00
|
|
|
this,
|
|
|
|
util::memFn(&Endpoint::OnLookup, this),
|
|
|
|
location,
|
|
|
|
PubKey{remote.as_array()},
|
|
|
|
order,
|
|
|
|
GenTXID());
|
|
|
|
LogInfo(
|
|
|
|
"doing lookup for ",
|
|
|
|
remote,
|
|
|
|
" via ",
|
|
|
|
path->Endpoint(),
|
|
|
|
" at ",
|
|
|
|
location,
|
|
|
|
" order=",
|
|
|
|
order);
|
2020-03-02 16:12:29 +00:00
|
|
|
order++;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (job->SendRequestViaPath(path, Router()))
|
2020-03-02 16:12:29 +00:00
|
|
|
{
|
|
|
|
lookups.emplace(remote, hook);
|
|
|
|
lookedUp++;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
LogError(Name(), " send via path failed for lookup");
|
2020-02-10 17:52:24 +00:00
|
|
|
}
|
2019-02-06 15:05:25 +00:00
|
|
|
}
|
2020-03-02 16:18:26 +00:00
|
|
|
return lookedUp == (NumParallelLookups * RequestsPerLookup);
|
2018-07-19 04:58:39 +00:00
|
|
|
}
|
|
|
|
|
2019-12-30 10:19:03 +00:00
|
|
|
bool
|
2019-07-01 13:44:25 +00:00
|
|
|
Endpoint::EnsurePathToSNode(const RouterID snode, SNodeEnsureHook h)
|
2018-11-29 13:12:35 +00:00
|
|
|
{
|
2019-12-30 10:19:03 +00:00
|
|
|
static constexpr size_t MaxConcurrentSNodeSessions = 16;
|
2019-07-15 09:15:51 +00:00
|
|
|
auto& nodeSessions = m_state->m_SNodeSessions;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (nodeSessions.size() >= MaxConcurrentSNodeSessions)
|
2019-12-30 10:19:03 +00:00
|
|
|
{
|
|
|
|
// a quick client side work arround before we do proper limiting
|
|
|
|
LogError(Name(), " has too many snode sessions");
|
|
|
|
return false;
|
|
|
|
}
|
2019-04-22 17:38:29 +00:00
|
|
|
using namespace std::placeholders;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (nodeSessions.count(snode) == 0)
|
2018-11-29 13:12:35 +00:00
|
|
|
{
|
2019-07-01 14:56:56 +00:00
|
|
|
ConvoTag tag;
|
|
|
|
// TODO: check for collision lol no we don't but maybe we will...
|
|
|
|
// some day :DDDDD
|
|
|
|
tag.Randomize();
|
2020-04-07 18:38:56 +00:00
|
|
|
auto session = std::make_shared<exit::SNodeSession>(
|
2018-12-02 18:07:07 +00:00
|
|
|
snode,
|
2019-07-01 13:44:25 +00:00
|
|
|
[=](const llarp_buffer_t& pkt) -> bool {
|
2019-07-01 14:56:56 +00:00
|
|
|
/// TODO: V6
|
|
|
|
return HandleInboundPacket(tag, pkt, eProtocolTrafficV4);
|
2019-07-01 13:44:25 +00:00
|
|
|
},
|
2020-04-07 18:38:56 +00:00
|
|
|
Router(),
|
|
|
|
numPaths,
|
|
|
|
numHops,
|
|
|
|
false,
|
|
|
|
ShouldBundleRC());
|
2019-07-15 09:15:51 +00:00
|
|
|
|
|
|
|
m_state->m_SNodeSessions.emplace(snode, std::make_pair(session, tag));
|
2018-11-29 13:12:35 +00:00
|
|
|
}
|
2019-04-30 21:36:27 +00:00
|
|
|
EnsureRouterIsKnown(snode);
|
2019-07-15 09:15:51 +00:00
|
|
|
auto range = nodeSessions.equal_range(snode);
|
2020-04-07 18:38:56 +00:00
|
|
|
auto itr = range.first;
|
|
|
|
while (itr != range.second)
|
2019-03-01 19:10:42 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr->second.first->IsReady())
|
2019-07-01 14:56:56 +00:00
|
|
|
h(snode, itr->second.first);
|
2019-03-07 15:17:29 +00:00
|
|
|
else
|
2019-04-30 21:36:27 +00:00
|
|
|
{
|
2019-07-01 14:56:56 +00:00
|
|
|
itr->second.first->AddReadyHook(std::bind(h, snode, _1));
|
|
|
|
itr->second.first->BuildOne();
|
2019-04-30 21:36:27 +00:00
|
|
|
}
|
2019-03-01 19:10:42 +00:00
|
|
|
++itr;
|
|
|
|
}
|
2019-12-30 10:19:03 +00:00
|
|
|
return true;
|
2018-11-29 13:12:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::SendToSNodeOrQueue(const RouterID& addr, const llarp_buffer_t& buf)
|
2018-11-29 13:12:35 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
auto pkt = std::make_shared<net::IPPacket>();
|
|
|
|
if (!pkt->Load(buf))
|
2018-11-29 13:12:35 +00:00
|
|
|
return false;
|
2019-04-30 21:36:27 +00:00
|
|
|
EnsurePathToSNode(addr, [pkt](RouterID, exit::BaseSession_ptr s) {
|
2020-04-07 18:38:56 +00:00
|
|
|
if (s)
|
2019-04-30 21:36:27 +00:00
|
|
|
s->QueueUpstreamTraffic(*pkt, routing::ExitPadSize);
|
|
|
|
});
|
|
|
|
return true;
|
2018-11-29 13:12:35 +00:00
|
|
|
}
|
|
|
|
|
2019-04-30 16:07:17 +00:00
|
|
|
void Endpoint::Pump(llarp_time_t)
|
2019-04-25 17:15:56 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
const auto& sessions = m_state->m_SNodeSessions;
|
2020-04-07 18:38:56 +00:00
|
|
|
auto& queue = m_state->m_InboundTrafficQueue;
|
2019-11-14 21:56:01 +00:00
|
|
|
|
2019-11-25 13:18:24 +00:00
|
|
|
auto epPump = [&]() {
|
2019-11-28 23:08:02 +00:00
|
|
|
FlushRecvData();
|
2019-05-22 16:20:50 +00:00
|
|
|
// send downstream packets to user for snode
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& item : sessions)
|
2019-07-01 14:56:56 +00:00
|
|
|
item.second.first->FlushDownstream();
|
2019-05-28 19:45:08 +00:00
|
|
|
// send downstream traffic to user for hidden service
|
De-abseil, part 2: mutex, locks, (most) time
- util::Mutex is now a std::shared_timed_mutex, which is capable of
exclusive and shared locks.
- util::Lock is still present as a std::lock_guard<util::Mutex>.
- the locking annotations are preserved, but updated to the latest
supported by clang rather than using abseil's older/deprecated ones.
- ACQUIRE_LOCK macro is gone since we don't pass mutexes by pointer into
locks anymore (WTF abseil).
- ReleasableLock is gone. Instead there are now some llarp::util helper
methods to obtain unique and/or shared locks:
- `auto lock = util::unique_lock(mutex);` gets an RAII-but-also
unlockable object (std::unique_lock<T>, with T inferred from
`mutex`).
- `auto lock = util::shared_lock(mutex);` gets an RAII shared (i.e.
"reader") lock of the mutex.
- `auto lock = util::unique_locks(mutex1, mutex2, mutex3);` can be
used to atomically lock multiple mutexes at once (returning a
tuple of the locks).
This are templated on the mutex which makes them a bit more flexible
than using a concrete type: they can be used for any type of lockable
mutex, not only util::Mutex. (Some of the code here uses them for
getting locks around a std::mutex). Until C++17, using the RAII types
is painfully verbose:
```C++
// pre-C++17 - needing to figure out the mutex type here is annoying:
std::unique_lock<util::Mutex> lock(mutex);
// pre-C++17 and even more verbose (but at least the type isn't needed):
std::unique_lock<decltype(mutex)> lock(mutex);
// our compromise:
auto lock = util::unique_lock(mutex);
// C++17:
std::unique_lock lock(mutex);
```
All of these functions will also warn (under gcc or clang) if you
discard the return value. You can also do fancy things like
`auto l = util::unique_lock(mutex, std::adopt_lock)` (which lets a
lock take over an already-locked mutex).
- metrics code is gone, which also removes a big pile of code that was
only used by metrics:
- llarp::util::Scheduler
- llarp::thread::TimerQueue
- llarp::util::Stopwatch
2020-02-21 17:21:11 +00:00
|
|
|
util::Lock lock(m_state->m_InboundTrafficQueueMutex);
|
2020-04-07 18:38:56 +00:00
|
|
|
while (not queue.empty())
|
2019-05-22 16:20:03 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
const auto& msg = queue.top();
|
2019-07-18 16:28:17 +00:00
|
|
|
const llarp_buffer_t buf(msg->payload);
|
2019-07-01 14:56:56 +00:00
|
|
|
HandleInboundPacket(msg->tag, buf, msg->proto);
|
2019-07-15 09:15:51 +00:00
|
|
|
queue.pop();
|
2019-05-22 16:20:03 +00:00
|
|
|
}
|
2019-11-25 13:18:24 +00:00
|
|
|
};
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
if (NetworkIsIsolated())
|
2019-11-25 13:18:24 +00:00
|
|
|
{
|
|
|
|
LogicCall(EndpointLogic(), epPump);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
epPump();
|
|
|
|
}
|
2019-04-30 16:07:17 +00:00
|
|
|
auto router = Router();
|
2019-05-22 16:20:03 +00:00
|
|
|
// TODO: locking on this container
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& item : m_state->m_RemoteSessions)
|
2019-04-30 16:07:17 +00:00
|
|
|
item.second->FlushUpstream();
|
2019-05-22 16:20:03 +00:00
|
|
|
// TODO: locking on this container
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& item : sessions)
|
2019-07-01 14:56:56 +00:00
|
|
|
item.second.first->FlushUpstream();
|
2019-09-19 20:28:12 +00:00
|
|
|
{
|
De-abseil, part 2: mutex, locks, (most) time
- util::Mutex is now a std::shared_timed_mutex, which is capable of
exclusive and shared locks.
- util::Lock is still present as a std::lock_guard<util::Mutex>.
- the locking annotations are preserved, but updated to the latest
supported by clang rather than using abseil's older/deprecated ones.
- ACQUIRE_LOCK macro is gone since we don't pass mutexes by pointer into
locks anymore (WTF abseil).
- ReleasableLock is gone. Instead there are now some llarp::util helper
methods to obtain unique and/or shared locks:
- `auto lock = util::unique_lock(mutex);` gets an RAII-but-also
unlockable object (std::unique_lock<T>, with T inferred from
`mutex`).
- `auto lock = util::shared_lock(mutex);` gets an RAII shared (i.e.
"reader") lock of the mutex.
- `auto lock = util::unique_locks(mutex1, mutex2, mutex3);` can be
used to atomically lock multiple mutexes at once (returning a
tuple of the locks).
This are templated on the mutex which makes them a bit more flexible
than using a concrete type: they can be used for any type of lockable
mutex, not only util::Mutex. (Some of the code here uses them for
getting locks around a std::mutex). Until C++17, using the RAII types
is painfully verbose:
```C++
// pre-C++17 - needing to figure out the mutex type here is annoying:
std::unique_lock<util::Mutex> lock(mutex);
// pre-C++17 and even more verbose (but at least the type isn't needed):
std::unique_lock<decltype(mutex)> lock(mutex);
// our compromise:
auto lock = util::unique_lock(mutex);
// C++17:
std::unique_lock lock(mutex);
```
All of these functions will also warn (under gcc or clang) if you
discard the return value. You can also do fancy things like
`auto l = util::unique_lock(mutex, std::adopt_lock)` (which lets a
lock take over an already-locked mutex).
- metrics code is gone, which also removes a big pile of code that was
only used by metrics:
- llarp::util::Scheduler
- llarp::thread::TimerQueue
- llarp::util::Stopwatch
2020-02-21 17:21:11 +00:00
|
|
|
util::Lock lock(m_state->m_SendQueueMutex);
|
2019-11-25 13:18:24 +00:00
|
|
|
// send outbound traffic
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& item : m_state->m_SendQueue)
|
2019-11-25 13:18:24 +00:00
|
|
|
{
|
|
|
|
item.second->SendRoutingMessage(*item.first, router);
|
|
|
|
MarkConvoTagActive(item.first->T.T);
|
|
|
|
}
|
|
|
|
m_state->m_SendQueue.clear();
|
2019-09-19 20:28:12 +00:00
|
|
|
}
|
2019-12-30 13:20:50 +00:00
|
|
|
UpstreamFlush(router);
|
2019-12-30 13:30:01 +00:00
|
|
|
router->linkManager().PumpLinks();
|
2019-04-25 17:15:56 +00:00
|
|
|
}
|
|
|
|
|
2019-07-01 14:56:56 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::EnsureConvo(
|
|
|
|
const AlignedBuffer<32> /*addr*/, bool snode, ConvoEventListener_ptr /*ev*/)
|
2019-07-01 14:56:56 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (snode)
|
2019-07-01 14:56:56 +00:00
|
|
|
{
|
|
|
|
}
|
2019-07-15 09:15:51 +00:00
|
|
|
|
|
|
|
// TODO: something meaningful
|
|
|
|
return false;
|
2019-07-01 14:56:56 +00:00
|
|
|
}
|
|
|
|
|
2018-08-22 15:52:10 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
Endpoint::SendToServiceOrQueue(
|
|
|
|
const service::Address& remote, const llarp_buffer_t& data, ProtocolType t)
|
2018-08-22 15:52:10 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (data.sz == 0)
|
2019-11-29 00:37:58 +00:00
|
|
|
return false;
|
2018-09-28 12:22:50 +00:00
|
|
|
// inbound converstation
|
2019-07-18 16:28:17 +00:00
|
|
|
const auto now = Now();
|
2018-11-14 12:23:08 +00:00
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
if (HasInboundConvo(remote))
|
2018-09-14 13:43:42 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
auto transfer = std::make_shared<routing::PathTransferMessage>();
|
2019-06-14 12:49:45 +00:00
|
|
|
ProtocolFrame& f = transfer->T;
|
2020-04-07 18:38:56 +00:00
|
|
|
std::shared_ptr<path::Path> p;
|
|
|
|
std::set<ConvoTag> tags;
|
|
|
|
if (GetConvoTagsForService(remote, tags))
|
2018-09-14 13:43:42 +00:00
|
|
|
{
|
2019-06-28 14:12:20 +00:00
|
|
|
// the remote guy's intro
|
2019-06-14 12:49:45 +00:00
|
|
|
Introduction remoteIntro;
|
2019-06-28 14:12:20 +00:00
|
|
|
Introduction replyPath;
|
2019-06-14 12:49:45 +00:00
|
|
|
SharedSecret K;
|
|
|
|
// pick tag
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& tag : tags)
|
2018-09-14 13:43:42 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (tag.IsZero())
|
2019-06-14 12:49:45 +00:00
|
|
|
continue;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!GetCachedSessionKeyFor(tag, K))
|
2019-06-14 12:49:45 +00:00
|
|
|
continue;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!GetReplyIntroFor(tag, replyPath))
|
2019-06-28 14:12:20 +00:00
|
|
|
continue;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!GetIntroFor(tag, remoteIntro))
|
2019-06-28 14:12:20 +00:00
|
|
|
continue;
|
|
|
|
// get path for intro
|
|
|
|
ForEachPath([&](path::Path_ptr path) {
|
2020-04-07 18:38:56 +00:00
|
|
|
if (path->intro == replyPath)
|
2019-07-01 21:35:49 +00:00
|
|
|
{
|
|
|
|
p = path;
|
2019-07-01 20:45:00 +00:00
|
|
|
return;
|
2019-07-01 21:35:49 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
if (p && p->ExpiresSoon(now) && path->IsReady()
|
|
|
|
&& path->intro.router == replyPath.router)
|
2019-07-01 21:35:49 +00:00
|
|
|
{
|
2019-06-28 14:12:20 +00:00
|
|
|
p = path;
|
2019-07-01 21:35:49 +00:00
|
|
|
}
|
2019-06-28 14:12:20 +00:00
|
|
|
});
|
2020-04-07 18:38:56 +00:00
|
|
|
if (p)
|
2018-09-14 13:43:42 +00:00
|
|
|
{
|
2019-06-28 14:12:20 +00:00
|
|
|
f.T = tag;
|
2018-09-14 13:43:42 +00:00
|
|
|
}
|
2019-06-14 12:49:45 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
if (p)
|
2019-06-14 12:49:45 +00:00
|
|
|
{
|
|
|
|
// TODO: check expiration of our end
|
2020-04-07 18:38:56 +00:00
|
|
|
auto m = std::make_shared<ProtocolMessage>(f.T);
|
2019-09-19 14:41:31 +00:00
|
|
|
m->PutBuffer(data);
|
2019-06-14 12:49:45 +00:00
|
|
|
f.N.Randomize();
|
|
|
|
f.C.Zero();
|
|
|
|
transfer->Y.Randomize();
|
2020-04-07 18:38:56 +00:00
|
|
|
m->proto = t;
|
2019-09-19 14:41:31 +00:00
|
|
|
m->introReply = p->intro;
|
|
|
|
PutReplyIntroFor(f.T, m->introReply);
|
2020-04-07 18:38:56 +00:00
|
|
|
m->sender = m_Identity.pub;
|
|
|
|
m->seqno = GetSeqNoForConvo(f.T);
|
|
|
|
f.S = 1;
|
|
|
|
f.F = m->introReply.pathID;
|
2019-06-14 12:49:45 +00:00
|
|
|
transfer->P = remoteIntro.pathID;
|
2020-04-07 18:38:56 +00:00
|
|
|
auto self = this;
|
2019-09-19 14:41:31 +00:00
|
|
|
return CryptoWorker()->addJob([transfer, p, m, K, self]() {
|
2020-04-07 18:38:56 +00:00
|
|
|
if (not transfer->T.EncryptAndSign(*m, K, self->m_Identity))
|
2019-09-19 14:41:31 +00:00
|
|
|
{
|
|
|
|
LogError("failed to encrypt and sign");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
De-abseil, part 2: mutex, locks, (most) time
- util::Mutex is now a std::shared_timed_mutex, which is capable of
exclusive and shared locks.
- util::Lock is still present as a std::lock_guard<util::Mutex>.
- the locking annotations are preserved, but updated to the latest
supported by clang rather than using abseil's older/deprecated ones.
- ACQUIRE_LOCK macro is gone since we don't pass mutexes by pointer into
locks anymore (WTF abseil).
- ReleasableLock is gone. Instead there are now some llarp::util helper
methods to obtain unique and/or shared locks:
- `auto lock = util::unique_lock(mutex);` gets an RAII-but-also
unlockable object (std::unique_lock<T>, with T inferred from
`mutex`).
- `auto lock = util::shared_lock(mutex);` gets an RAII shared (i.e.
"reader") lock of the mutex.
- `auto lock = util::unique_locks(mutex1, mutex2, mutex3);` can be
used to atomically lock multiple mutexes at once (returning a
tuple of the locks).
This are templated on the mutex which makes them a bit more flexible
than using a concrete type: they can be used for any type of lockable
mutex, not only util::Mutex. (Some of the code here uses them for
getting locks around a std::mutex). Until C++17, using the RAII types
is painfully verbose:
```C++
// pre-C++17 - needing to figure out the mutex type here is annoying:
std::unique_lock<util::Mutex> lock(mutex);
// pre-C++17 and even more verbose (but at least the type isn't needed):
std::unique_lock<decltype(mutex)> lock(mutex);
// our compromise:
auto lock = util::unique_lock(mutex);
// C++17:
std::unique_lock lock(mutex);
```
All of these functions will also warn (under gcc or clang) if you
discard the return value. You can also do fancy things like
`auto l = util::unique_lock(mutex, std::adopt_lock)` (which lets a
lock take over an already-locked mutex).
- metrics code is gone, which also removes a big pile of code that was
only used by metrics:
- llarp::util::Scheduler
- llarp::thread::TimerQueue
- llarp::util::Stopwatch
2020-02-21 17:21:11 +00:00
|
|
|
util::Lock lock(self->m_state->m_SendQueueMutex);
|
2019-09-19 14:41:31 +00:00
|
|
|
self->m_state->m_SendQueue.emplace_back(transfer, p);
|
|
|
|
});
|
2018-09-14 13:43:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-02-18 16:00:45 +00:00
|
|
|
else
|
2018-08-22 15:52:10 +00:00
|
|
|
{
|
2020-02-14 18:12:45 +00:00
|
|
|
auto& sessions = m_state->m_RemoteSessions;
|
2020-04-07 18:38:56 +00:00
|
|
|
auto range = sessions.equal_range(remote);
|
|
|
|
auto itr = range.first;
|
|
|
|
while (itr != range.second)
|
2018-09-28 12:22:50 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr->second->ReadyToSend())
|
2018-09-28 12:22:50 +00:00
|
|
|
{
|
|
|
|
itr->second->AsyncEncryptAndSendTo(data, t);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
++itr;
|
|
|
|
}
|
2020-02-18 16:00:45 +00:00
|
|
|
// if we want to make an outbound session
|
2020-04-07 18:38:56 +00:00
|
|
|
if (WantsOutboundSession(remote))
|
2020-02-14 18:12:45 +00:00
|
|
|
{
|
|
|
|
// add pending traffic
|
|
|
|
auto& traffic = m_state->m_PendingTraffic;
|
|
|
|
traffic[remote].emplace_back(data, t);
|
|
|
|
return EnsurePathToService(
|
|
|
|
remote,
|
|
|
|
[self = this](Address addr, OutboundContext* ctx) {
|
2020-04-07 18:38:56 +00:00
|
|
|
if (ctx)
|
2020-02-14 18:12:45 +00:00
|
|
|
{
|
2020-02-14 20:14:43 +00:00
|
|
|
ctx->UpdateIntroSet();
|
2020-04-07 18:38:56 +00:00
|
|
|
for (auto& pending : self->m_state->m_PendingTraffic[addr])
|
2020-02-14 18:12:45 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
ctx->AsyncEncryptAndSendTo(pending.Buffer(), pending.protocol);
|
2020-02-14 18:12:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
self->m_state->m_PendingTraffic.erase(addr);
|
|
|
|
},
|
2020-02-24 19:40:45 +00:00
|
|
|
1500ms);
|
2020-02-14 18:12:45 +00:00
|
|
|
}
|
2018-08-22 15:52:10 +00:00
|
|
|
}
|
2020-02-14 18:12:45 +00:00
|
|
|
return false;
|
2018-09-24 19:50:52 +00:00
|
|
|
}
|
2018-08-22 15:52:10 +00:00
|
|
|
|
2019-03-08 17:00:13 +00:00
|
|
|
bool
|
|
|
|
Endpoint::HasConvoTag(const ConvoTag& t) const
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
return Sessions().find(t) != Sessions().end();
|
2019-03-08 17:00:13 +00:00
|
|
|
}
|
|
|
|
|
2018-08-09 19:02:17 +00:00
|
|
|
uint64_t
|
|
|
|
Endpoint::GetSeqNoForConvo(const ConvoTag& tag)
|
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
auto itr = Sessions().find(tag);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr == Sessions().end())
|
2018-08-09 19:02:17 +00:00
|
|
|
return 0;
|
|
|
|
return ++(itr->second.seqno);
|
|
|
|
}
|
|
|
|
|
2019-03-08 14:36:24 +00:00
|
|
|
bool
|
|
|
|
Endpoint::ShouldBuildMore(llarp_time_t now) const
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (path::Builder::BuildCooldownHit(now))
|
2019-11-05 16:58:53 +00:00
|
|
|
return false;
|
2020-02-24 18:15:12 +00:00
|
|
|
|
|
|
|
size_t numBuilding = NumInStatus(path::ePathBuilding);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (numBuilding > 0)
|
2020-02-24 18:15:12 +00:00
|
|
|
return false;
|
|
|
|
|
2020-03-01 16:33:45 +00:00
|
|
|
return ((now - lastBuild) > path::intro_path_spread)
|
|
|
|
|| NumInStatus(path::ePathEstablished) < path::min_intro_paths;
|
2018-09-24 19:50:52 +00:00
|
|
|
}
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
std::shared_ptr<Logic>
|
2018-08-09 19:02:17 +00:00
|
|
|
Endpoint::RouterLogic()
|
2018-07-19 04:58:39 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
return Router()->logic();
|
2018-07-19 04:58:39 +00:00
|
|
|
}
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
std::shared_ptr<Logic>
|
2018-08-09 19:02:17 +00:00
|
|
|
Endpoint::EndpointLogic()
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
return m_state->m_IsolatedLogic ? m_state->m_IsolatedLogic : Router()->logic();
|
2018-08-09 19:02:17 +00:00
|
|
|
}
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
std::shared_ptr<llarp::thread::ThreadPool>
|
2019-04-23 14:28:59 +00:00
|
|
|
Endpoint::CryptoWorker()
|
2018-07-19 04:58:39 +00:00
|
|
|
{
|
2019-07-15 09:15:51 +00:00
|
|
|
return Router()->threadpool();
|
2018-07-19 04:58:39 +00:00
|
|
|
}
|
|
|
|
|
2019-07-15 09:15:51 +00:00
|
|
|
AbstractRouter*
|
|
|
|
Endpoint::Router()
|
|
|
|
{
|
|
|
|
return m_state->m_Router;
|
|
|
|
}
|
|
|
|
|
2020-05-21 14:18:23 +00:00
|
|
|
void
|
|
|
|
Endpoint::BlacklistSNode(const RouterID snode)
|
|
|
|
{
|
|
|
|
m_state->m_SnodeBlacklist.insert(snode);
|
|
|
|
}
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
const std::set<RouterID>&
|
2019-07-15 09:15:51 +00:00
|
|
|
Endpoint::SnodeBlacklist() const
|
|
|
|
{
|
|
|
|
return m_state->m_SnodeBlacklist;
|
|
|
|
}
|
|
|
|
|
|
|
|
const IntroSet&
|
|
|
|
Endpoint::introSet() const
|
|
|
|
{
|
|
|
|
return m_state->m_IntroSet;
|
|
|
|
}
|
|
|
|
|
|
|
|
IntroSet&
|
|
|
|
Endpoint::introSet()
|
|
|
|
{
|
|
|
|
return m_state->m_IntroSet;
|
|
|
|
}
|
|
|
|
|
|
|
|
const ConvoMap&
|
|
|
|
Endpoint::Sessions() const
|
|
|
|
{
|
|
|
|
return m_state->m_Sessions;
|
|
|
|
}
|
|
|
|
|
|
|
|
ConvoMap&
|
|
|
|
Endpoint::Sessions()
|
|
|
|
{
|
|
|
|
return m_state->m_Sessions;
|
|
|
|
}
|
2018-07-12 18:21:44 +00:00
|
|
|
} // namespace service
|
2018-07-16 03:32:13 +00:00
|
|
|
} // namespace llarp
|