mirror of
https://github.com/oxen-io/lokinet.git
synced 2024-11-11 07:10:36 +00:00
49b9ad7197
* partial tun code refactor * take out the trash * move vpn platform code into llarp/vpn/platform.cpp * fix hive build * fix win32 * fix memory leak on win32 * reduce cpu use * make macos compile * win32 patches: * use wepoll for zmq * use all cores on windows iocp read loop * fix zmq patch for windows * clean up cmake for win32 * add uninstall before reinstall option to win32 installer * more ipv6 stuff * make it compile * fix up route poker * remove an unneeded code block in macos wtf * always use call to system * fix route poker behavior on macos * disable ipv6 on windows for now * cpu perf improvement: * colease calls to Router::PumpLL to 1 per event loop wakeup * set up THEN add addresses * emulate proactor event loop on win32 * remove excessively verbose error message * fix issue #1499 * exclude uv_poll from win32 so that it can start up * update logtag to include directory * create minidump on windows if there was a crash * make windows happy * use dmp suffix on minidump files * typo fix * address feedback from jason * use PROJECT_SOURCE_DIR instead of CMAKE_SOURCE_DIR * quote $@ in apply-patches in case path has spaces in it * address feedback from tom * remove llarp/ev/pipe * add comments for clairification * make event loop queue size constant named
689 lines
19 KiB
C++
689 lines
19 KiB
C++
#include <handlers/exit.hpp>
|
|
|
|
#include <dns/dns.hpp>
|
|
#include <net/net.hpp>
|
|
#include <path/path_context.hpp>
|
|
#include <router/abstractrouter.hpp>
|
|
#include <util/str.hpp>
|
|
#include <util/bits.hpp>
|
|
|
|
#include <cassert>
|
|
|
|
namespace llarp
|
|
{
|
|
namespace handlers
|
|
{
|
|
ExitEndpoint::ExitEndpoint(const std::string& name, AbstractRouter* r)
|
|
: m_Router(r)
|
|
, m_Resolver(std::make_shared<dns::Proxy>(
|
|
r->netloop(), r->logic(), r->netloop(), r->logic(), this))
|
|
, m_Name(name)
|
|
, m_LocalResolverAddr("127.0.0.1", 53)
|
|
, m_InetToNetwork(name + "_exit_rx", r->netloop(), r->netloop())
|
|
|
|
{
|
|
m_ShouldInitTun = true;
|
|
}
|
|
|
|
ExitEndpoint::~ExitEndpoint() = default;
|
|
|
|
util::StatusObject
|
|
ExitEndpoint::ExtractStatus() const
|
|
{
|
|
util::StatusObject obj{{"permitExit", m_PermitExit}, {"ip", m_IfAddr.ToString()}};
|
|
util::StatusObject exitsObj{};
|
|
for (const auto& item : m_ActiveExits)
|
|
{
|
|
exitsObj[item.first.ToString()] = item.second->ExtractStatus();
|
|
}
|
|
obj["exits"] = exitsObj;
|
|
return obj;
|
|
}
|
|
|
|
bool
|
|
ExitEndpoint::SupportsV6() const
|
|
{
|
|
return m_UseV6;
|
|
}
|
|
|
|
bool
|
|
ExitEndpoint::ShouldHookDNSMessage(const dns::Message& msg) const
|
|
{
|
|
if (msg.questions.size() == 0)
|
|
return false;
|
|
// always hook ptr for ranges we own
|
|
if (msg.questions[0].qtype == dns::qTypePTR)
|
|
{
|
|
huint128_t ip;
|
|
if (!dns::DecodePTR(msg.questions[0].qname, ip))
|
|
return false;
|
|
return m_OurRange.Contains(ip);
|
|
}
|
|
if (msg.questions[0].qtype == dns::qTypeA || msg.questions[0].qtype == dns::qTypeCNAME
|
|
|| msg.questions[0].qtype == dns::qTypeAAAA)
|
|
{
|
|
if (msg.questions[0].IsName("localhost.loki"))
|
|
return true;
|
|
if (msg.questions[0].HasTLD(".snode"))
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
bool
|
|
ExitEndpoint::HandleHookedDNSMessage(dns::Message msg, std::function<void(dns::Message)> reply)
|
|
{
|
|
if (msg.questions[0].qtype == dns::qTypePTR)
|
|
{
|
|
huint128_t ip;
|
|
if (!dns::DecodePTR(msg.questions[0].qname, ip))
|
|
return false;
|
|
if (ip == m_IfAddr)
|
|
{
|
|
RouterID us = GetRouter()->pubkey();
|
|
msg.AddAReply(us.ToString(), 300);
|
|
}
|
|
else
|
|
{
|
|
auto itr = m_IPToKey.find(ip);
|
|
if (itr != m_IPToKey.end() && m_SNodeKeys.find(itr->second) != m_SNodeKeys.end())
|
|
{
|
|
RouterID them = itr->second;
|
|
msg.AddAReply(them.ToString());
|
|
}
|
|
else
|
|
msg.AddNXReply();
|
|
}
|
|
}
|
|
else if (msg.questions[0].qtype == dns::qTypeCNAME)
|
|
{
|
|
if (msg.questions[0].IsName("random.snode"))
|
|
{
|
|
RouterID random;
|
|
if (GetRouter()->GetRandomGoodRouter(random))
|
|
msg.AddCNAMEReply(random.ToString(), 1);
|
|
else
|
|
msg.AddNXReply();
|
|
}
|
|
else if (msg.questions[0].IsName("localhost.loki"))
|
|
{
|
|
RouterID us = m_Router->pubkey();
|
|
msg.AddAReply(us.ToString(), 1);
|
|
}
|
|
else
|
|
msg.AddNXReply();
|
|
}
|
|
else if (msg.questions[0].qtype == dns::qTypeA || msg.questions[0].qtype == dns::qTypeAAAA)
|
|
{
|
|
const bool isV6 = msg.questions[0].qtype == dns::qTypeAAAA;
|
|
const bool isV4 = msg.questions[0].qtype == dns::qTypeA;
|
|
if (msg.questions[0].IsName("random.snode"))
|
|
{
|
|
RouterID random;
|
|
if (GetRouter()->GetRandomGoodRouter(random))
|
|
{
|
|
msg.AddCNAMEReply(random.ToString(), 1);
|
|
auto ip = ObtainServiceNodeIP(random);
|
|
msg.AddINReply(ip, false);
|
|
}
|
|
else
|
|
msg.AddNXReply();
|
|
reply(msg);
|
|
return true;
|
|
}
|
|
if (msg.questions[0].IsName("localhost.loki"))
|
|
{
|
|
msg.AddINReply(GetIfAddr(), isV6);
|
|
reply(msg);
|
|
return true;
|
|
}
|
|
// forward dns for snode
|
|
RouterID r;
|
|
if (r.FromString(msg.questions[0].Name()))
|
|
{
|
|
huint128_t ip;
|
|
PubKey pubKey(r);
|
|
if (isV4 && SupportsV6())
|
|
{
|
|
msg.hdr_fields |= dns::flags_QR | dns::flags_AA | dns::flags_RA;
|
|
}
|
|
else if (m_SNodeKeys.find(pubKey) == m_SNodeKeys.end())
|
|
{
|
|
// we do not have it mapped, async obtain it
|
|
ObtainSNodeSession(r, [&](std::shared_ptr<exit::BaseSession> session) {
|
|
if (session && session->IsReady())
|
|
{
|
|
msg.AddINReply(m_KeyToIP[pubKey], isV6);
|
|
}
|
|
else
|
|
{
|
|
msg.AddNXReply();
|
|
}
|
|
reply(msg);
|
|
});
|
|
return true;
|
|
}
|
|
else
|
|
{
|
|
// we have it mapped already as a service node
|
|
auto itr = m_KeyToIP.find(pubKey);
|
|
if (itr != m_KeyToIP.end())
|
|
{
|
|
ip = itr->second;
|
|
msg.AddINReply(ip, isV6);
|
|
}
|
|
else // fallback case that should never happen (probably)
|
|
msg.AddNXReply();
|
|
}
|
|
}
|
|
else
|
|
msg.AddNXReply();
|
|
}
|
|
reply(msg);
|
|
return true;
|
|
}
|
|
|
|
void
|
|
ExitEndpoint::ObtainSNodeSession(const RouterID& router, exit::SessionReadyFunc obtainCb)
|
|
{
|
|
ObtainServiceNodeIP(router);
|
|
m_SNodeSessions[router]->AddReadyHook(obtainCb);
|
|
}
|
|
|
|
llarp_time_t
|
|
ExitEndpoint::Now() const
|
|
{
|
|
return m_Router->Now();
|
|
}
|
|
|
|
bool
|
|
ExitEndpoint::VisitEndpointsFor(
|
|
const PubKey& pk, std::function<bool(exit::Endpoint* const)> visit)
|
|
{
|
|
auto range = m_ActiveExits.equal_range(pk);
|
|
auto itr = range.first;
|
|
while (itr != range.second)
|
|
{
|
|
if (visit(itr->second.get()))
|
|
++itr;
|
|
else
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
void
|
|
ExitEndpoint::Flush()
|
|
{
|
|
m_InetToNetwork.Process([&](Pkt_t& pkt) {
|
|
PubKey pk;
|
|
{
|
|
auto itr = m_IPToKey.find(pkt.dstv6());
|
|
if (itr == m_IPToKey.end())
|
|
{
|
|
// drop
|
|
LogWarn(Name(), " dropping packet, has no session at ", pkt.dstv6());
|
|
return;
|
|
}
|
|
pk = itr->second;
|
|
}
|
|
// check if this key is a service node
|
|
if (m_SNodeKeys.find(pk) != m_SNodeKeys.end())
|
|
{
|
|
// check if it's a service node session we made and queue it via our
|
|
// snode session that we made otherwise use an inbound session that
|
|
// was made by the other service node
|
|
auto itr = m_SNodeSessions.find(pk);
|
|
if (itr != m_SNodeSessions.end())
|
|
{
|
|
if (itr->second->QueueUpstreamTraffic(pkt, routing::ExitPadSize))
|
|
return;
|
|
}
|
|
}
|
|
auto tryFlushingTraffic = [&](exit::Endpoint* const ep) -> bool {
|
|
if (!ep->QueueInboundTraffic(ManagedBuffer{pkt.Buffer()}))
|
|
{
|
|
LogWarn(
|
|
Name(),
|
|
" dropped inbound traffic for session ",
|
|
pk,
|
|
" as we are overloaded (probably)");
|
|
// continue iteration
|
|
return true;
|
|
}
|
|
// break iteration
|
|
return false;
|
|
};
|
|
if (!VisitEndpointsFor(pk, tryFlushingTraffic))
|
|
{
|
|
// we may have all dead sessions, wtf now?
|
|
LogWarn(
|
|
Name(),
|
|
" dropped inbound traffic for session ",
|
|
pk,
|
|
" as we have no working endpoints");
|
|
}
|
|
});
|
|
{
|
|
auto itr = m_ActiveExits.begin();
|
|
while (itr != m_ActiveExits.end())
|
|
{
|
|
if (!itr->second->Flush())
|
|
{
|
|
LogWarn("exit session with ", itr->first, " dropped packets");
|
|
}
|
|
++itr;
|
|
}
|
|
}
|
|
{
|
|
auto itr = m_SNodeSessions.begin();
|
|
while (itr != m_SNodeSessions.end())
|
|
{
|
|
// TODO: move flush upstream to router event loop
|
|
if (!itr->second->FlushUpstream())
|
|
{
|
|
LogWarn("failed to flush snode traffic to ", itr->first, " via outbound session");
|
|
}
|
|
itr->second->FlushDownstream();
|
|
++itr;
|
|
}
|
|
}
|
|
m_Router->PumpLL();
|
|
}
|
|
|
|
bool
|
|
ExitEndpoint::Start()
|
|
{
|
|
// map our address
|
|
const PubKey us(m_Router->pubkey());
|
|
const huint128_t ip = GetIfAddr();
|
|
m_KeyToIP[us] = ip;
|
|
m_IPToKey[ip] = us;
|
|
m_IPActivity[ip] = std::numeric_limits<llarp_time_t>::max();
|
|
m_SNodeKeys.insert(us);
|
|
if (m_ShouldInitTun)
|
|
{
|
|
vpn::InterfaceInfo info;
|
|
info.ifname = m_ifname;
|
|
info.addrs.emplace(m_OurRange);
|
|
|
|
m_NetIf = GetRouter()->GetVPNPlatform()->ObtainInterface(std::move(info));
|
|
if (not m_NetIf)
|
|
{
|
|
llarp::LogError("Could not create interface");
|
|
return false;
|
|
}
|
|
auto loop = GetRouter()->netloop();
|
|
if (not loop->add_network_interface(
|
|
m_NetIf, [&](net::IPPacket pkt) { OnInetPacket(std::move(pkt)); }))
|
|
{
|
|
llarp::LogWarn("Could not create tunnel for exit endpoint");
|
|
return false;
|
|
}
|
|
|
|
loop->add_ticker([&]() { Flush(); });
|
|
|
|
llarp::LogInfo("Trying to start resolver ", m_LocalResolverAddr.toString());
|
|
return m_Resolver->Start(m_LocalResolverAddr, m_UpstreamResolvers);
|
|
}
|
|
return true;
|
|
}
|
|
|
|
AbstractRouter*
|
|
ExitEndpoint::GetRouter()
|
|
{
|
|
return m_Router;
|
|
}
|
|
|
|
huint128_t
|
|
ExitEndpoint::GetIfAddr() const
|
|
{
|
|
return m_IfAddr;
|
|
}
|
|
|
|
bool
|
|
ExitEndpoint::Stop()
|
|
{
|
|
for (auto& item : m_SNodeSessions)
|
|
item.second->Stop();
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
ExitEndpoint::ShouldRemove() const
|
|
{
|
|
for (auto& item : m_SNodeSessions)
|
|
if (!item.second->ShouldRemove())
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
ExitEndpoint::HasLocalMappedAddrFor(const PubKey& pk) const
|
|
{
|
|
return m_KeyToIP.find(pk) != m_KeyToIP.end();
|
|
}
|
|
|
|
huint128_t
|
|
ExitEndpoint::GetIPForIdent(const PubKey pk)
|
|
{
|
|
huint128_t found = {0};
|
|
if (!HasLocalMappedAddrFor(pk))
|
|
{
|
|
// allocate and map
|
|
found.h = AllocateNewAddress().h;
|
|
if (!m_KeyToIP.emplace(pk, found).second)
|
|
{
|
|
LogError(Name(), "failed to map ", pk, " to ", found);
|
|
return found;
|
|
}
|
|
if (!m_IPToKey.emplace(found, pk).second)
|
|
{
|
|
LogError(Name(), "failed to map ", found, " to ", pk);
|
|
return found;
|
|
}
|
|
if (HasLocalMappedAddrFor(pk))
|
|
LogInfo(Name(), " mapping ", pk, " to ", found);
|
|
else
|
|
LogError(Name(), "failed to map ", pk, " to ", found);
|
|
}
|
|
else
|
|
found.h = m_KeyToIP[pk].h;
|
|
|
|
MarkIPActive(found);
|
|
m_KeyToIP.rehash(0);
|
|
assert(HasLocalMappedAddrFor(pk));
|
|
return found;
|
|
}
|
|
|
|
huint128_t
|
|
ExitEndpoint::AllocateNewAddress()
|
|
{
|
|
if (m_NextAddr < m_HigestAddr)
|
|
return ++m_NextAddr;
|
|
|
|
// find oldest activity ip address
|
|
huint128_t found = {0};
|
|
llarp_time_t min = std::numeric_limits<llarp_time_t>::max();
|
|
auto itr = m_IPActivity.begin();
|
|
while (itr != m_IPActivity.end())
|
|
{
|
|
if (itr->second < min)
|
|
{
|
|
found.h = itr->first.h;
|
|
min = itr->second;
|
|
}
|
|
++itr;
|
|
}
|
|
// kick old ident off exit
|
|
// TODO: DoS
|
|
PubKey pk = m_IPToKey[found];
|
|
KickIdentOffExit(pk);
|
|
|
|
return found;
|
|
}
|
|
|
|
bool
|
|
ExitEndpoint::QueueOutboundTraffic(net::IPPacket pkt)
|
|
{
|
|
return m_NetIf && m_NetIf->WritePacket(std::move(pkt));
|
|
}
|
|
|
|
void
|
|
ExitEndpoint::KickIdentOffExit(const PubKey& pk)
|
|
{
|
|
LogInfo(Name(), " kicking ", pk, " off exit");
|
|
huint128_t ip = m_KeyToIP[pk];
|
|
m_KeyToIP.erase(pk);
|
|
m_IPToKey.erase(ip);
|
|
auto range = m_ActiveExits.equal_range(pk);
|
|
auto exit_itr = range.first;
|
|
while (exit_itr != range.second)
|
|
exit_itr = m_ActiveExits.erase(exit_itr);
|
|
}
|
|
|
|
void
|
|
ExitEndpoint::MarkIPActive(huint128_t ip)
|
|
{
|
|
m_IPActivity[ip] = GetRouter()->Now();
|
|
}
|
|
|
|
void
|
|
ExitEndpoint::OnInetPacket(net::IPPacket pkt)
|
|
{
|
|
m_InetToNetwork.Emplace(std::move(pkt));
|
|
}
|
|
|
|
bool
|
|
ExitEndpoint::QueueSNodePacket(const llarp_buffer_t& buf, huint128_t from)
|
|
{
|
|
net::IPPacket pkt;
|
|
if (!pkt.Load(buf))
|
|
return false;
|
|
// rewrite ip
|
|
if (m_UseV6)
|
|
pkt.UpdateIPv6Address(from, m_IfAddr);
|
|
else
|
|
pkt.UpdateIPv4Address(xhtonl(net::TruncateV6(from)), xhtonl(net::TruncateV6(m_IfAddr)));
|
|
return m_NetIf and m_NetIf->WritePacket(std::move(pkt));
|
|
}
|
|
|
|
exit::Endpoint*
|
|
ExitEndpoint::FindEndpointByPath(const PathID_t& path)
|
|
{
|
|
exit::Endpoint* endpoint = nullptr;
|
|
PubKey pk;
|
|
{
|
|
auto itr = m_Paths.find(path);
|
|
if (itr == m_Paths.end())
|
|
return nullptr;
|
|
pk = itr->second;
|
|
}
|
|
{
|
|
auto itr = m_ActiveExits.find(pk);
|
|
if (itr != m_ActiveExits.end())
|
|
{
|
|
if (itr->second->PubKey() == pk)
|
|
endpoint = itr->second.get();
|
|
}
|
|
}
|
|
return endpoint;
|
|
}
|
|
|
|
bool
|
|
ExitEndpoint::UpdateEndpointPath(const PubKey& remote, const PathID_t& next)
|
|
{
|
|
// check if already mapped
|
|
auto itr = m_Paths.find(next);
|
|
if (itr != m_Paths.end())
|
|
return false;
|
|
m_Paths.emplace(next, remote);
|
|
return true;
|
|
}
|
|
|
|
void
|
|
ExitEndpoint::Configure(const NetworkConfig& networkConfig, const DnsConfig& dnsConfig)
|
|
{
|
|
/*
|
|
* TODO: pre-config refactor, this was checking a couple things that were extremely vague
|
|
* these could have appeared on either [dns] or [network], but they weren't documented
|
|
* anywhere
|
|
*
|
|
if (k == "type" && v == "null")
|
|
{
|
|
m_ShouldInitTun = false;
|
|
return true;
|
|
}
|
|
if (k == "exit")
|
|
{
|
|
m_PermitExit = IsTrueValue(v.c_str());
|
|
return true;
|
|
}
|
|
*/
|
|
if (networkConfig.m_endpointType == "null")
|
|
{
|
|
m_ShouldInitTun = false;
|
|
}
|
|
|
|
m_LocalResolverAddr = dnsConfig.m_bind;
|
|
m_UpstreamResolvers = dnsConfig.m_upstreamDNS;
|
|
|
|
m_OurRange = networkConfig.m_ifaddr;
|
|
if (!m_OurRange.addr.h)
|
|
{
|
|
const auto maybe = llarp::FindFreeRange();
|
|
if (not maybe.has_value())
|
|
throw std::runtime_error("cannot find free interface range");
|
|
m_OurRange = *maybe;
|
|
}
|
|
const auto host_str = m_OurRange.BaseAddressString();
|
|
// string, or just a plain char array?
|
|
m_IfAddr = m_OurRange.addr;
|
|
m_NextAddr = m_IfAddr;
|
|
m_HigestAddr = m_OurRange.HighestAddr();
|
|
m_UseV6 = not m_OurRange.IsV4();
|
|
|
|
m_ifname = networkConfig.m_ifname;
|
|
if (m_ifname.empty())
|
|
{
|
|
const auto maybe = llarp::FindFreeTun();
|
|
if (not maybe.has_value())
|
|
throw std::runtime_error("cannot find free interface name");
|
|
m_ifname = *maybe;
|
|
}
|
|
LogInfo(Name(), " set ifname to ", m_ifname);
|
|
|
|
// TODO: "exit-whitelist" and "exit-blacklist"
|
|
// (which weren't originally implemented)
|
|
}
|
|
|
|
huint128_t
|
|
ExitEndpoint::ObtainServiceNodeIP(const RouterID& other)
|
|
{
|
|
const PubKey pubKey(other);
|
|
const PubKey us(m_Router->pubkey());
|
|
// just in case
|
|
if (pubKey == us)
|
|
return m_IfAddr;
|
|
|
|
huint128_t ip = GetIPForIdent(pubKey);
|
|
if (m_SNodeKeys.emplace(pubKey).second)
|
|
{
|
|
auto session = std::make_shared<exit::SNodeSession>(
|
|
other,
|
|
std::bind(&ExitEndpoint::QueueSNodePacket, this, std::placeholders::_1, ip),
|
|
GetRouter(),
|
|
2,
|
|
1,
|
|
true,
|
|
false);
|
|
// this is a new service node make an outbound session to them
|
|
m_SNodeSessions.emplace(other, session);
|
|
}
|
|
return ip;
|
|
}
|
|
|
|
bool
|
|
ExitEndpoint::AllocateNewExit(const PubKey pk, const PathID_t& path, bool wantInternet)
|
|
{
|
|
if (wantInternet && !m_PermitExit)
|
|
return false;
|
|
auto ip = GetIPForIdent(pk);
|
|
if (GetRouter()->pathContext().TransitHopPreviousIsRouter(path, pk.as_array()))
|
|
{
|
|
// we think this path belongs to a service node
|
|
// mark it as such so we don't make an outbound session to them
|
|
m_SNodeKeys.emplace(pk.as_array());
|
|
}
|
|
m_ActiveExits.emplace(
|
|
pk, std::make_unique<exit::Endpoint>(pk, path, !wantInternet, ip, this));
|
|
|
|
m_Paths[path] = pk;
|
|
|
|
return HasLocalMappedAddrFor(pk);
|
|
}
|
|
|
|
std::string
|
|
ExitEndpoint::Name() const
|
|
{
|
|
return m_Name;
|
|
}
|
|
|
|
void
|
|
ExitEndpoint::DelEndpointInfo(const PathID_t& path)
|
|
{
|
|
m_Paths.erase(path);
|
|
}
|
|
|
|
void
|
|
ExitEndpoint::RemoveExit(const exit::Endpoint* ep)
|
|
{
|
|
auto range = m_ActiveExits.equal_range(ep->PubKey());
|
|
auto itr = range.first;
|
|
while (itr != range.second)
|
|
{
|
|
if (itr->second->LocalPath() == ep->LocalPath())
|
|
{
|
|
itr = m_ActiveExits.erase(itr);
|
|
// now ep is gone af
|
|
return;
|
|
}
|
|
++itr;
|
|
}
|
|
}
|
|
|
|
void
|
|
ExitEndpoint::Tick(llarp_time_t now)
|
|
{
|
|
{
|
|
auto itr = m_SNodeSessions.begin();
|
|
while (itr != m_SNodeSessions.end())
|
|
{
|
|
if (itr->second->IsExpired(now))
|
|
itr = m_SNodeSessions.erase(itr);
|
|
else
|
|
{
|
|
itr->second->Tick(now);
|
|
++itr;
|
|
}
|
|
}
|
|
}
|
|
{
|
|
// expire
|
|
auto itr = m_ActiveExits.begin();
|
|
while (itr != m_ActiveExits.end())
|
|
{
|
|
if (itr->second->IsExpired(now))
|
|
itr = m_ActiveExits.erase(itr);
|
|
else
|
|
++itr;
|
|
}
|
|
// pick chosen exits and tick
|
|
m_ChosenExits.clear();
|
|
itr = m_ActiveExits.begin();
|
|
while (itr != m_ActiveExits.end())
|
|
{
|
|
// do we have an exit set for this key?
|
|
if (m_ChosenExits.find(itr->first) != m_ChosenExits.end())
|
|
{
|
|
// yes
|
|
if (m_ChosenExits[itr->first]->createdAt < itr->second->createdAt)
|
|
{
|
|
// if the iterators's exit is newer use it for the chosen exit for
|
|
// key
|
|
if (!itr->second->LooksDead(now))
|
|
m_ChosenExits[itr->first] = itr->second.get();
|
|
}
|
|
}
|
|
else if (!itr->second->LooksDead(now)) // set chosen exit if not dead for key that
|
|
// doesn't have one yet
|
|
m_ChosenExits[itr->first] = itr->second.get();
|
|
// tick which clears the tx rx counters
|
|
itr->second->Tick(now);
|
|
++itr;
|
|
}
|
|
}
|
|
}
|
|
} // namespace handlers
|
|
} // namespace llarp
|