lokinet/llarp/handlers/exit.cpp

714 lines
20 KiB
C++
Raw Normal View History

2018-12-12 01:12:59 +00:00
#include <handlers/exit.hpp>
#include <dns/dns.hpp>
#include <net/net.hpp>
2019-06-17 23:19:39 +00:00
#include <path/path_context.hpp>
#include <router/abstractrouter.hpp>
#include <util/str.hpp>
#include <util/bits.hpp>
2018-11-15 21:47:05 +00:00
#include <cassert>
namespace llarp
{
namespace handlers
{
static void
ExitHandlerRecvPkt(llarp_tun_io* tun, const llarp_buffer_t& buf)
{
std::vector<byte_t> pkt;
2019-11-20 19:45:23 +00:00
pkt.resize(buf.sz);
std::copy_n(buf.base, buf.sz, pkt.data());
auto self = static_cast<ExitEndpoint*>(tun->user);
2019-11-20 19:45:23 +00:00
LogicCall(self->GetRouter()->logic(), [self, pktbuf = std::move(pkt)]() {
self->OnInetPacket(std::move(pktbuf));
});
}
static void
ExitHandlerFlush(llarp_tun_io* tun)
{
auto* ep = static_cast<ExitEndpoint*>(tun->user);
LogicCall(ep->GetRouter()->logic(), std::bind(&ExitEndpoint::Flush, ep));
}
ExitEndpoint::ExitEndpoint(const std::string& name, AbstractRouter* r)
: m_Router(r)
, m_Resolver(std::make_shared<dns::Proxy>(
2019-05-22 16:20:50 +00:00
r->netloop(), r->logic(), r->netloop(), r->logic(), this))
, m_Name(name)
, m_Tun{{0},
0,
0,
{0},
nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
nullptr}
, m_LocalResolverAddr("127.0.0.1", 53)
, m_InetToNetwork(name + "_exit_rx", r->netloop(), r->netloop())
{
m_Tun.user = this;
m_Tun.recvpkt = &ExitHandlerRecvPkt;
m_Tun.tick = &ExitHandlerFlush;
2018-11-15 21:47:05 +00:00
m_ShouldInitTun = true;
}
2019-07-30 23:42:13 +00:00
ExitEndpoint::~ExitEndpoint() = default;
2019-02-11 17:14:43 +00:00
util::StatusObject
ExitEndpoint::ExtractStatus() const
2019-02-08 19:43:25 +00:00
{
util::StatusObject obj{{"permitExit", m_PermitExit}, {"ip", m_IfAddr.ToString()}};
2019-02-11 17:14:43 +00:00
util::StatusObject exitsObj{};
for (const auto& item : m_ActiveExits)
2019-02-08 19:43:25 +00:00
{
2019-08-19 21:06:30 +00:00
exitsObj[item.first.ToString()] = item.second->ExtractStatus();
2019-02-08 19:43:25 +00:00
}
obj["exits"] = exitsObj;
2019-02-11 17:14:43 +00:00
return obj;
2019-02-08 19:43:25 +00:00
}
2019-06-11 19:42:11 +00:00
bool
ExitEndpoint::SupportsV6() const
{
return m_UseV6;
}
bool
ExitEndpoint::ShouldHookDNSMessage(const dns::Message& msg) const
{
if (msg.questions.size() == 0)
return false;
2019-01-10 15:49:08 +00:00
// always hook ptr for ranges we own
if (msg.questions[0].qtype == dns::qTypePTR)
{
2019-06-12 13:48:14 +00:00
huint128_t ip;
if (!dns::DecodePTR(msg.questions[0].qname, ip))
return false;
2019-06-12 13:48:14 +00:00
return m_OurRange.Contains(ip);
}
if (msg.questions[0].qtype == dns::qTypeA || msg.questions[0].qtype == dns::qTypeCNAME
|| msg.questions[0].qtype == dns::qTypeAAAA)
{
if (msg.questions[0].IsName("localhost.loki"))
return true;
if (msg.questions[0].HasTLD(".snode"))
2019-03-20 16:40:28 +00:00
return true;
}
2019-05-01 13:53:43 +00:00
return false;
}
bool
ExitEndpoint::HandleHookedDNSMessage(dns::Message msg, std::function<void(dns::Message)> reply)
{
if (msg.questions[0].qtype == dns::qTypePTR)
{
2019-06-12 13:48:14 +00:00
huint128_t ip;
if (!dns::DecodePTR(msg.questions[0].qname, ip))
return false;
if (ip == m_IfAddr)
{
RouterID us = GetRouter()->pubkey();
msg.AddAReply(us.ToString(), 300);
}
else
{
2019-06-12 13:48:14 +00:00
auto itr = m_IPToKey.find(ip);
if (itr != m_IPToKey.end() && m_SNodeKeys.find(itr->second) != m_SNodeKeys.end())
{
RouterID them = itr->second;
msg.AddAReply(them.ToString());
}
else
msg.AddNXReply();
}
}
else if (msg.questions[0].qtype == dns::qTypeCNAME)
2019-01-10 15:49:08 +00:00
{
if (msg.questions[0].IsName("random.snode"))
2019-01-10 15:49:08 +00:00
{
RouterID random;
if (GetRouter()->GetRandomGoodRouter(random))
2019-01-10 15:49:08 +00:00
msg.AddCNAMEReply(random.ToString(), 1);
else
msg.AddNXReply();
}
else if (msg.questions[0].IsName("localhost.loki"))
2019-03-20 16:40:28 +00:00
{
RouterID us = m_Router->pubkey();
msg.AddAReply(us.ToString(), 1);
}
2019-01-10 15:49:08 +00:00
else
msg.AddNXReply();
}
else if (msg.questions[0].qtype == dns::qTypeA || msg.questions[0].qtype == dns::qTypeAAAA)
{
const bool isV6 = msg.questions[0].qtype == dns::qTypeAAAA;
2019-06-11 19:42:11 +00:00
const bool isV4 = msg.questions[0].qtype == dns::qTypeA;
if (msg.questions[0].IsName("random.snode"))
2019-03-22 01:00:38 +00:00
{
RouterID random;
if (GetRouter()->GetRandomGoodRouter(random))
2019-05-09 12:24:38 +00:00
{
2019-03-22 01:00:38 +00:00
msg.AddCNAMEReply(random.ToString(), 1);
2019-05-09 12:24:38 +00:00
auto ip = ObtainServiceNodeIP(random);
msg.AddINReply(ip, false);
}
2019-03-22 01:00:38 +00:00
else
msg.AddNXReply();
reply(msg);
return true;
}
if (msg.questions[0].IsName("localhost.loki"))
2019-03-20 16:40:28 +00:00
{
msg.AddINReply(GetIfAddr(), isV6);
2019-03-20 16:40:28 +00:00
reply(msg);
return true;
}
// forward dns for snode
RouterID r;
if (r.FromString(msg.questions[0].Name()))
{
2019-06-11 16:44:05 +00:00
huint128_t ip;
PubKey pubKey(r);
if (isV4 && SupportsV6())
2019-06-11 19:42:11 +00:00
{
msg.hdr_fields |= dns::flags_QR | dns::flags_AA | dns::flags_RA;
}
else if (m_SNodeKeys.find(pubKey) == m_SNodeKeys.end())
{
2019-07-26 16:19:08 +00:00
// we do not have it mapped, async obtain it
ObtainSNodeSession(r, [&](std::shared_ptr<exit::BaseSession> session) {
if (session && session->IsReady())
{
msg.AddINReply(m_KeyToIP[pubKey], isV6);
}
else
{
msg.AddNXReply();
}
reply(msg);
});
2019-07-28 13:20:08 +00:00
return true;
}
else
{
// we have it mapped already as a service node
auto itr = m_KeyToIP.find(pubKey);
if (itr != m_KeyToIP.end())
{
ip = itr->second;
msg.AddINReply(ip, isV6);
}
else // fallback case that should never happen (probably)
msg.AddNXReply();
}
}
else
msg.AddNXReply();
}
reply(msg);
return true;
}
2019-07-26 16:19:08 +00:00
void
ExitEndpoint::ObtainSNodeSession(const RouterID& router, exit::SessionReadyFunc obtainCb)
2019-07-26 16:19:08 +00:00
{
ObtainServiceNodeIP(router);
m_SNodeSessions[router]->AddReadyHook(obtainCb);
}
llarp_time_t
2018-11-28 12:32:38 +00:00
ExitEndpoint::Now() const
{
return m_Router->Now();
}
bool
2019-05-07 17:46:38 +00:00
ExitEndpoint::VisitEndpointsFor(
const PubKey& pk, std::function<bool(exit::Endpoint* const)> visit)
{
auto range = m_ActiveExits.equal_range(pk);
auto itr = range.first;
while (itr != range.second)
{
if (visit(itr->second.get()))
++itr;
else
return true;
}
return false;
}
void
2018-11-28 16:38:20 +00:00
ExitEndpoint::Flush()
{
m_InetToNetwork.Process([&](Pkt_t& pkt) {
PubKey pk;
{
auto itr = m_IPToKey.find(pkt.dstv6());
if (itr == m_IPToKey.end())
{
// drop
LogWarn(Name(), " dropping packet, has no session at ", pkt.dstv6());
return;
}
pk = itr->second;
}
// check if this key is a service node
if (m_SNodeKeys.find(pk) != m_SNodeKeys.end())
{
// check if it's a service node session we made and queue it via our
// snode session that we made otherwise use an inbound session that
// was made by the other service node
auto itr = m_SNodeSessions.find(pk);
if (itr != m_SNodeSessions.end())
{
if (itr->second->QueueUpstreamTraffic(pkt, routing::ExitPadSize))
return;
}
}
auto tryFlushingTraffic = [&](exit::Endpoint* const ep) -> bool {
if (!ep->QueueInboundTraffic(ManagedBuffer{pkt.Buffer()}))
2019-05-08 14:59:28 +00:00
{
LogWarn(
Name(),
" dropped inbound traffic for session ",
pk,
" as we are overloaded (probably)");
2019-05-08 14:59:28 +00:00
// continue iteration
return true;
}
// break iteration
return false;
};
if (!VisitEndpointsFor(pk, tryFlushingTraffic))
{
// we may have all dead sessions, wtf now?
LogWarn(
Name(),
" dropped inbound traffic for session ",
pk,
" as we have no working endpoints");
}
});
2018-11-28 16:38:20 +00:00
{
auto itr = m_ActiveExits.begin();
while (itr != m_ActiveExits.end())
2018-11-28 16:38:20 +00:00
{
if (!itr->second->Flush())
{
LogWarn("exit session with ", itr->first, " dropped packets");
}
++itr;
}
}
{
auto itr = m_SNodeSessions.begin();
while (itr != m_SNodeSessions.end())
{
2019-04-30 13:56:39 +00:00
// TODO: move flush upstream to router event loop
if (!itr->second->FlushUpstream())
{
LogWarn("failed to flush snode traffic to ", itr->first, " via outbound session");
}
2019-04-30 13:56:39 +00:00
itr->second->FlushDownstream();
++itr;
2018-11-28 16:38:20 +00:00
}
}
2019-04-30 16:07:17 +00:00
m_Router->PumpLL();
}
bool
ExitEndpoint::Start()
{
2019-06-10 15:54:04 +00:00
// map our address
const PubKey us(m_Router->pubkey());
2019-06-11 16:44:05 +00:00
const huint128_t ip = GetIfAddr();
m_KeyToIP[us] = ip;
m_IPToKey[ip] = us;
m_IPActivity[ip] = std::numeric_limits<llarp_time_t>::max();
2019-06-10 15:54:04 +00:00
m_SNodeKeys.insert(us);
if (m_ShouldInitTun)
{
2019-04-08 12:01:52 +00:00
auto loop = GetRouter()->netloop();
if (!llarp_ev_add_tun(loop.get(), &m_Tun))
{
llarp::LogWarn("Could not create tunnel for exit endpoint");
return false;
}
2020-05-06 20:38:44 +00:00
llarp::LogInfo("Trying to start resolver ", m_LocalResolverAddr.toString());
return m_Resolver->Start(m_LocalResolverAddr, m_UpstreamResolvers);
}
2018-11-15 21:47:05 +00:00
return true;
}
AbstractRouter*
ExitEndpoint::GetRouter()
{
return m_Router;
}
2019-06-11 16:44:05 +00:00
huint128_t
ExitEndpoint::GetIfAddr() const
{
return m_IfAddr;
}
bool
ExitEndpoint::Stop()
{
for (auto& item : m_SNodeSessions)
item.second->Stop();
return true;
}
bool
ExitEndpoint::ShouldRemove() const
{
for (auto& item : m_SNodeSessions)
if (!item.second->ShouldRemove())
return false;
return true;
}
2018-11-15 16:19:24 +00:00
bool
ExitEndpoint::HasLocalMappedAddrFor(const PubKey& pk) const
2018-11-15 16:19:24 +00:00
{
2018-11-15 21:47:05 +00:00
return m_KeyToIP.find(pk) != m_KeyToIP.end();
2018-11-15 16:19:24 +00:00
}
2019-06-11 16:44:05 +00:00
huint128_t
ExitEndpoint::GetIPForIdent(const PubKey pk)
{
2019-06-11 16:44:05 +00:00
huint128_t found = {0};
if (!HasLocalMappedAddrFor(pk))
{
// allocate and map
2018-11-15 21:47:05 +00:00
found.h = AllocateNewAddress().h;
if (!m_KeyToIP.emplace(pk, found).second)
2018-11-15 16:00:16 +00:00
{
LogError(Name(), "failed to map ", pk, " to ", found);
2018-11-15 16:00:16 +00:00
return found;
}
if (!m_IPToKey.emplace(found, pk).second)
2018-11-15 16:00:16 +00:00
{
LogError(Name(), "failed to map ", found, " to ", pk);
2018-11-15 16:00:16 +00:00
return found;
}
if (HasLocalMappedAddrFor(pk))
LogInfo(Name(), " mapping ", pk, " to ", found);
2018-11-15 15:43:58 +00:00
else
LogError(Name(), "failed to map ", pk, " to ", found);
}
else
2018-11-15 16:15:25 +00:00
found.h = m_KeyToIP[pk].h;
MarkIPActive(found);
2018-11-15 21:47:05 +00:00
m_KeyToIP.rehash(0);
assert(HasLocalMappedAddrFor(pk));
return found;
}
2019-06-11 16:44:05 +00:00
huint128_t
ExitEndpoint::AllocateNewAddress()
{
if (m_NextAddr < m_HigestAddr)
return ++m_NextAddr;
// find oldest activity ip address
2019-06-11 16:44:05 +00:00
huint128_t found = {0};
llarp_time_t min = std::numeric_limits<llarp_time_t>::max();
auto itr = m_IPActivity.begin();
while (itr != m_IPActivity.end())
{
if (itr->second < min)
{
2018-11-15 18:10:09 +00:00
found.h = itr->first.h;
min = itr->second;
}
++itr;
}
// kick old ident off exit
// TODO: DoS
PubKey pk = m_IPToKey[found];
KickIdentOffExit(pk);
return found;
}
bool
ExitEndpoint::QueueOutboundTraffic(const llarp_buffer_t& buf)
{
2019-02-02 23:12:42 +00:00
return llarp_ev_tun_async_write(&m_Tun, buf);
}
void
ExitEndpoint::KickIdentOffExit(const PubKey& pk)
{
LogInfo(Name(), " kicking ", pk, " off exit");
2019-06-11 16:44:05 +00:00
huint128_t ip = m_KeyToIP[pk];
m_KeyToIP.erase(pk);
m_IPToKey.erase(ip);
auto range = m_ActiveExits.equal_range(pk);
auto exit_itr = range.first;
while (exit_itr != range.second)
exit_itr = m_ActiveExits.erase(exit_itr);
}
void
2019-06-11 16:44:05 +00:00
ExitEndpoint::MarkIPActive(huint128_t ip)
{
m_IPActivity[ip] = GetRouter()->Now();
}
void
ExitEndpoint::OnInetPacket(std::vector<byte_t> buf)
{
2019-11-20 19:45:23 +00:00
const llarp_buffer_t buffer(buf);
m_InetToNetwork.EmplaceIf(
[b = ManagedBuffer(buffer)](Pkt_t& pkt) -> bool { return pkt.Load(b); });
}
bool
ExitEndpoint::QueueSNodePacket(const llarp_buffer_t& buf, huint128_t from)
{
2019-06-11 16:44:05 +00:00
net::IPPacket pkt;
if (!pkt.Load(buf))
return false;
// rewrite ip
if (m_UseV6)
2019-12-06 20:33:43 +00:00
pkt.UpdateIPv6Address(from, m_IfAddr);
else
2020-05-20 22:48:13 +00:00
pkt.UpdateIPv4Address(xhtonl(net::TruncateV6(from)), xhtonl(net::TruncateV6(m_IfAddr)));
return llarp_ev_tun_async_write(&m_Tun, pkt.Buffer());
}
exit::Endpoint*
ExitEndpoint::FindEndpointByPath(const PathID_t& path)
2018-11-14 12:23:08 +00:00
{
exit::Endpoint* endpoint = nullptr;
PubKey pk;
2018-11-14 12:23:08 +00:00
{
auto itr = m_Paths.find(path);
if (itr == m_Paths.end())
2018-11-14 12:23:08 +00:00
return nullptr;
pk = itr->second;
}
{
auto itr = m_ActiveExits.find(pk);
if (itr != m_ActiveExits.end())
2018-11-14 12:23:08 +00:00
{
if (itr->second->PubKey() == pk)
2018-11-15 21:47:05 +00:00
endpoint = itr->second.get();
2018-11-14 12:23:08 +00:00
}
}
return endpoint;
}
bool
ExitEndpoint::UpdateEndpointPath(const PubKey& remote, const PathID_t& next)
2018-11-14 12:23:08 +00:00
{
// check if already mapped
auto itr = m_Paths.find(next);
if (itr != m_Paths.end())
2018-11-14 12:23:08 +00:00
return false;
m_Paths.emplace(next, remote);
2018-11-14 12:23:08 +00:00
return true;
}
void
ExitEndpoint::Configure(const NetworkConfig& networkConfig, const DnsConfig& dnsConfig)
{
/*
* TODO: pre-config refactor, this was checking a couple things that were extremely vague
* these could have appeared on either [dns] or [network], but they weren't documented
* anywhere
*
if (k == "type" && v == "null")
2018-11-15 21:47:05 +00:00
{
m_ShouldInitTun = false;
return true;
}
if (k == "exit")
{
2018-11-14 12:23:08 +00:00
m_PermitExit = IsTrueValue(v.c_str());
return true;
}
*/
if (networkConfig.m_endpointType == "null")
{
m_ShouldInitTun = false;
}
m_LocalResolverAddr = dnsConfig.m_bind;
m_UpstreamResolvers = dnsConfig.m_upstreamDNS;
m_OurRange = networkConfig.m_ifaddr;
const auto host_str = m_OurRange.BaseAddressString();
// string, or just a plain char array?
strncpy(m_Tun.ifaddr, host_str.c_str(), sizeof(m_Tun.ifaddr) - 1);
m_Tun.netmask = m_OurRange.HostmaskBits();
m_IfAddr = m_OurRange.addr;
m_NextAddr = m_IfAddr;
m_HigestAddr = m_OurRange.HighestAddr();
LogInfo(
Name(),
" set ifaddr range to ",
m_Tun.ifaddr,
"/",
m_Tun.netmask,
" lo=",
m_IfAddr,
" hi=",
m_HigestAddr);
m_UseV6 = not m_OurRange.IsV4();
if (networkConfig.m_ifname.length() >= sizeof(m_Tun.ifname))
{
throw std::invalid_argument(
stringify(Name() + " ifname '", networkConfig.m_ifname, "' is too long"));
}
strncpy(m_Tun.ifname, networkConfig.m_ifname.c_str(), sizeof(m_Tun.ifname) - 1);
LogInfo(Name(), " set ifname to ", m_Tun.ifname);
// TODO: "exit-whitelist" and "exit-blacklist"
// (which weren't originally implemented)
}
2019-06-11 16:44:05 +00:00
huint128_t
ExitEndpoint::ObtainServiceNodeIP(const RouterID& other)
{
2019-06-10 15:54:04 +00:00
const PubKey pubKey(other);
const PubKey us(m_Router->pubkey());
// just in case
if (pubKey == us)
2019-06-10 15:54:04 +00:00
return m_IfAddr;
2019-06-11 16:44:05 +00:00
huint128_t ip = GetIPForIdent(pubKey);
if (m_SNodeKeys.emplace(pubKey).second)
{
auto session = std::make_shared<exit::SNodeSession>(
other,
std::bind(&ExitEndpoint::QueueSNodePacket, this, std::placeholders::_1, ip),
GetRouter(),
2,
1,
true,
false);
2019-04-23 16:13:22 +00:00
// this is a new service node make an outbound session to them
m_SNodeSessions.emplace(other, session);
}
return ip;
}
2018-11-14 12:23:08 +00:00
bool
ExitEndpoint::AllocateNewExit(const PubKey pk, const PathID_t& path, bool wantInternet)
2018-11-14 12:23:08 +00:00
{
if (wantInternet && !m_PermitExit)
return false;
2019-06-11 16:44:05 +00:00
auto ip = GetIPForIdent(pk);
if (GetRouter()->pathContext().TransitHopPreviousIsRouter(path, pk.as_array()))
{
// we think this path belongs to a service node
// mark it as such so we don't make an outbound session to them
m_SNodeKeys.emplace(pk.as_array());
}
m_ActiveExits.emplace(
pk, std::make_unique<exit::Endpoint>(pk, path, !wantInternet, ip, this));
2018-11-15 14:08:42 +00:00
m_Paths[path] = pk;
2018-11-15 21:47:05 +00:00
return HasLocalMappedAddrFor(pk);
2018-11-14 12:23:08 +00:00
}
std::string
ExitEndpoint::Name() const
{
return m_Name;
}
2018-11-14 12:23:08 +00:00
void
ExitEndpoint::DelEndpointInfo(const PathID_t& path)
2018-11-14 12:23:08 +00:00
{
m_Paths.erase(path);
}
2018-11-14 18:02:27 +00:00
void
ExitEndpoint::RemoveExit(const exit::Endpoint* ep)
2018-11-14 18:02:27 +00:00
{
auto range = m_ActiveExits.equal_range(ep->PubKey());
auto itr = range.first;
while (itr != range.second)
2018-11-14 18:02:27 +00:00
{
if (itr->second->LocalPath() == ep->LocalPath())
2018-11-14 18:02:27 +00:00
{
itr = m_ActiveExits.erase(itr);
// now ep is gone af
return;
}
++itr;
}
}
void
ExitEndpoint::Tick(llarp_time_t now)
{
{
auto itr = m_SNodeSessions.begin();
while (itr != m_SNodeSessions.end())
{
if (itr->second->IsExpired(now))
itr = m_SNodeSessions.erase(itr);
else
2019-04-23 16:13:22 +00:00
{
itr->second->Tick(now);
++itr;
2019-04-23 16:13:22 +00:00
}
}
}
{
2018-12-23 13:29:11 +00:00
// expire
auto itr = m_ActiveExits.begin();
while (itr != m_ActiveExits.end())
2018-11-14 18:02:27 +00:00
{
if (itr->second->IsExpired(now))
itr = m_ActiveExits.erase(itr);
else
++itr;
2018-12-23 13:29:11 +00:00
}
// pick chosen exits and tick
m_ChosenExits.clear();
itr = m_ActiveExits.begin();
while (itr != m_ActiveExits.end())
2018-12-23 13:29:11 +00:00
{
// do we have an exit set for this key?
if (m_ChosenExits.find(itr->first) != m_ChosenExits.end())
2018-12-23 13:29:11 +00:00
{
// yes
if (m_ChosenExits[itr->first]->createdAt < itr->second->createdAt)
2018-12-23 13:29:11 +00:00
{
// if the iterators's exit is newer use it for the chosen exit for
// key
if (!itr->second->LooksDead(now))
2018-12-23 14:04:26 +00:00
m_ChosenExits[itr->first] = itr->second.get();
2018-12-23 13:29:11 +00:00
}
}
else if (!itr->second->LooksDead(now)) // set chosen exit if not dead for key that
// doesn't have one yet
2018-12-23 13:29:11 +00:00
m_ChosenExits[itr->first] = itr->second.get();
// tick which clears the tx rx counters
itr->second->Tick(now);
++itr;
2018-11-14 18:02:27 +00:00
}
}
}
} // namespace handlers
2018-11-21 23:37:17 +00:00
} // namespace llarp