lokinet/llarp/handlers/exit.cpp

668 lines
18 KiB
C++
Raw Normal View History

2018-12-12 01:12:59 +00:00
#include <handlers/exit.hpp>
#include <dns/dns.hpp>
#include <net/net.hpp>
#include <router/abstractrouter.hpp>
#include <util/str.hpp>
2018-11-15 21:47:05 +00:00
#include <cassert>
namespace llarp
{
namespace handlers
{
static void
ExitHandlerRecvPkt(llarp_tun_io *tun, const llarp_buffer_t &buf)
{
2018-11-23 14:37:26 +00:00
static_cast< ExitEndpoint * >(tun->user)->OnInetPacket(buf);
}
static void
2018-11-28 16:38:20 +00:00
ExitHandlerFlush(llarp_tun_io *tun)
{
2018-11-28 16:38:20 +00:00
static_cast< ExitEndpoint * >(tun->user)->Flush();
}
ExitEndpoint::ExitEndpoint(const std::string &name, AbstractRouter *r)
: m_Router(r)
, m_Resolver(r->netloop(), this)
, m_Name(name)
, m_Tun{{0}, 0, {0}, 0, 0, 0, 0, 0, 0, 0}
, m_LocalResolverAddr("127.0.0.1", 53)
, m_InetToNetwork(name + "_exit_rx", r->netloop(), r->netloop())
{
2018-11-15 21:47:05 +00:00
m_Tun.user = this;
m_Tun.recvpkt = &ExitHandlerRecvPkt;
2018-11-28 16:38:20 +00:00
m_Tun.tick = &ExitHandlerFlush;
2018-11-15 21:47:05 +00:00
m_ShouldInitTun = true;
}
ExitEndpoint::~ExitEndpoint()
{
}
2019-02-11 17:14:43 +00:00
util::StatusObject
ExitEndpoint::ExtractStatus() const
2019-02-08 19:43:25 +00:00
{
2019-02-11 17:14:43 +00:00
util::StatusObject obj{{"permitExit", m_PermitExit},
{"ip", m_IfAddr.ToString()}};
util::StatusObject exitsObj{};
for(const auto &item : m_ActiveExits)
2019-02-08 19:43:25 +00:00
{
2019-02-11 17:14:43 +00:00
exitsObj.Put(item.first.ToHex(), item.second->ExtractStatus());
2019-02-08 19:43:25 +00:00
}
2019-02-11 17:14:43 +00:00
obj.Put("exits", exitsObj);
return obj;
2019-02-08 19:43:25 +00:00
}
bool
ExitEndpoint::ShouldHookDNSMessage(const dns::Message &msg) const
{
if(msg.questions.size() == 0)
return false;
2019-01-10 15:49:08 +00:00
// always hook ptr for ranges we own
if(msg.questions[0].qtype == dns::qTypePTR)
{
huint32_t ip;
if(!dns::DecodePTR(msg.questions[0].qname, ip))
return false;
return m_OurRange.Contains(ip);
}
else if(msg.questions[0].qtype == dns::qTypeA
|| msg.questions[0].qtype == dns::qTypeCNAME
|| msg.questions[0].qtype == dns::qTypeAAAA)
{
if(msg.questions[0].IsName("localhost.loki"))
return true;
if(msg.questions[0].HasTLD(".snode"))
2019-03-20 16:40:28 +00:00
return true;
}
2019-05-01 13:53:43 +00:00
return false;
}
bool
ExitEndpoint::HandleHookedDNSMessage(
dns::Message &&msg, std::function< void(dns::Message) > reply)
{
if(msg.questions[0].qtype == dns::qTypePTR)
{
huint32_t ip;
if(!dns::DecodePTR(msg.questions[0].qname, ip))
return false;
if(ip == m_IfAddr)
{
RouterID us = GetRouter()->pubkey();
msg.AddAReply(us.ToString(), 300);
}
else
{
auto itr = m_IPToKey.find(ip);
if(itr != m_IPToKey.end()
&& m_SNodeKeys.find(itr->second) != m_SNodeKeys.end())
{
RouterID them = itr->second;
msg.AddAReply(them.ToString());
}
else
msg.AddNXReply();
}
}
2019-01-10 15:49:08 +00:00
else if(msg.questions[0].qtype == dns::qTypeCNAME)
{
2019-04-26 12:11:34 +00:00
if(msg.questions[0].IsName("random.snode"))
2019-01-10 15:49:08 +00:00
{
RouterID random;
2019-01-10 15:51:31 +00:00
if(GetRouter()->GetRandomGoodRouter(random))
2019-01-10 15:49:08 +00:00
msg.AddCNAMEReply(random.ToString(), 1);
else
msg.AddNXReply();
}
2019-04-26 12:11:34 +00:00
else if(msg.questions[0].IsName("localhost.loki"))
2019-03-20 16:40:28 +00:00
{
RouterID us = m_Router->pubkey();
msg.AddAReply(us.ToString(), 1);
}
2019-01-10 15:49:08 +00:00
else
msg.AddNXReply();
}
else if(msg.questions[0].qtype == dns::qTypeA
|| msg.questions[0].qtype == dns::qTypeAAAA)
{
const bool isV6 = msg.questions[0].qtype == dns::qTypeAAAA;
2019-04-26 12:11:34 +00:00
if(msg.questions[0].IsName("random.snode"))
2019-03-22 01:00:38 +00:00
{
RouterID random;
if(GetRouter()->GetRandomGoodRouter(random))
msg.AddCNAMEReply(random.ToString(), 1);
else
msg.AddNXReply();
reply(msg);
return true;
}
2019-04-26 12:11:34 +00:00
if(msg.questions[0].IsName("localhost.loki"))
2019-03-20 16:40:28 +00:00
{
msg.AddINReply(GetIfAddr(), isV6);
2019-03-20 16:40:28 +00:00
reply(msg);
return true;
}
// forward dns for snode
RouterID r;
2019-04-26 12:11:34 +00:00
if(r.FromString(msg.questions[0].Name()))
{
huint32_t ip;
PubKey pubKey(r);
if(m_SNodeKeys.find(pubKey) == m_SNodeKeys.end())
{
// we do not have it mapped
// map it
ip = ObtainServiceNodeIP(r);
msg.AddINReply(ip, isV6);
}
else
{
// we have it mapped already as a service node
auto itr = m_KeyToIP.find(pubKey);
if(itr != m_KeyToIP.end())
{
ip = itr->second;
msg.AddINReply(ip, isV6);
}
else // fallback case that should never happen (probably)
msg.AddNXReply();
}
}
else
msg.AddNXReply();
}
reply(msg);
return true;
}
llarp_time_t
2018-11-28 12:32:38 +00:00
ExitEndpoint::Now() const
{
return m_Router->Now();
}
void
2018-11-28 16:38:20 +00:00
ExitEndpoint::Flush()
{
m_InetToNetwork.Process([&](Pkt_t &pkt) {
PubKey pk;
{
auto itr = m_IPToKey.find(pkt.dst());
if(itr == m_IPToKey.end())
{
// drop
LogWarn(Name(), " dropping packet, has no session at ", pkt.dst());
return;
}
pk = itr->second;
}
// check if this key is a service node
if(m_SNodeKeys.find(pk) != m_SNodeKeys.end())
{
// check if it's a service node session we made and queue it via our
// snode session that we made otherwise use an inbound session that
// was made by the other service node
auto itr = m_SNodeSessions.find(pk);
if(itr != m_SNodeSessions.end())
{
if(itr->second->QueueUpstreamTraffic(pkt, routing::ExitPadSize))
return;
}
}
exit::Endpoint *ep = m_ChosenExits[pk];
2018-11-28 12:32:38 +00:00
if(ep == nullptr)
{
// we may have all dead sessions, wtf now?
LogWarn(Name(), " dropped inbound traffic for session ", pk,
" as we have no working endpoints");
2018-11-28 12:32:38 +00:00
}
else
{
2019-02-03 00:48:10 +00:00
if(!ep->QueueInboundTraffic(ManagedBuffer{pkt.Buffer()}))
2018-11-15 23:07:39 +00:00
{
LogWarn(Name(), " dropped inbound traffic for session ", pk,
" as we are overloaded (probably)");
2018-11-15 23:07:39 +00:00
}
}
});
2018-11-28 16:38:20 +00:00
{
auto itr = m_ActiveExits.begin();
while(itr != m_ActiveExits.end())
2018-11-28 16:38:20 +00:00
{
if(!itr->second->Flush())
{
LogWarn("exit session with ", itr->first, " dropped packets");
}
++itr;
}
}
{
auto itr = m_SNodeSessions.begin();
while(itr != m_SNodeSessions.end())
{
2019-04-30 13:56:39 +00:00
// TODO: move flush upstream to router event loop
if(!itr->second->FlushUpstream())
{
LogWarn("failed to flush snode traffic to ", itr->first,
" via outbound session");
}
2019-04-30 13:56:39 +00:00
itr->second->FlushDownstream();
++itr;
2018-11-28 16:38:20 +00:00
}
}
2019-04-30 16:07:17 +00:00
m_Router->PumpLL();
}
bool
ExitEndpoint::Start()
{
2018-11-15 21:47:05 +00:00
if(m_ShouldInitTun)
{
2019-04-08 12:01:52 +00:00
auto loop = GetRouter()->netloop();
if(!llarp_ev_add_tun(loop.get(), &m_Tun))
{
llarp::LogWarn("Could not create tunnel for exit endpoint");
return false;
}
llarp::LogInfo("Trying to start resolver ",
m_LocalResolverAddr.ToString());
return m_Resolver.Start(m_LocalResolverAddr, m_UpstreamResolvers);
}
2018-11-15 21:47:05 +00:00
return true;
}
AbstractRouter *
ExitEndpoint::GetRouter()
{
return m_Router;
}
Crypto *
ExitEndpoint::GetCrypto()
{
return m_Router->crypto();
}
huint32_t
ExitEndpoint::GetIfAddr() const
{
return m_IfAddr;
}
bool
ExitEndpoint::Stop()
{
for(auto &item : m_SNodeSessions)
item.second->Stop();
return true;
}
bool
ExitEndpoint::ShouldRemove() const
{
for(auto &item : m_SNodeSessions)
if(!item.second->ShouldRemove())
return false;
return true;
}
2018-11-15 16:19:24 +00:00
bool
ExitEndpoint::HasLocalMappedAddrFor(const PubKey &pk) const
2018-11-15 16:19:24 +00:00
{
2018-11-15 21:47:05 +00:00
return m_KeyToIP.find(pk) != m_KeyToIP.end();
2018-11-15 16:19:24 +00:00
}
huint32_t
ExitEndpoint::GetIPForIdent(const PubKey pk)
{
2018-11-15 16:15:25 +00:00
huint32_t found = {0};
2018-11-15 16:19:24 +00:00
if(!HasLocalMappedAddrFor(pk))
{
// allocate and map
2018-11-15 21:47:05 +00:00
found.h = AllocateNewAddress().h;
2018-11-15 16:00:16 +00:00
if(!m_KeyToIP.emplace(pk, found).second)
{
LogError(Name(), "failed to map ", pk, " to ", found);
2018-11-15 16:00:16 +00:00
return found;
}
if(!m_IPToKey.emplace(found, pk).second)
{
LogError(Name(), "failed to map ", found, " to ", pk);
2018-11-15 16:00:16 +00:00
return found;
}
2018-11-15 16:19:24 +00:00
if(HasLocalMappedAddrFor(pk))
LogInfo(Name(), " mapping ", pk, " to ", found);
2018-11-15 15:43:58 +00:00
else
LogError(Name(), "failed to map ", pk, " to ", found);
}
else
2018-11-15 16:15:25 +00:00
found.h = m_KeyToIP[pk].h;
MarkIPActive(found);
2018-11-15 21:47:05 +00:00
m_KeyToIP.rehash(0);
assert(HasLocalMappedAddrFor(pk));
return found;
}
huint32_t
ExitEndpoint::AllocateNewAddress()
{
if(m_NextAddr < m_HigestAddr)
return ++m_NextAddr;
// find oldest activity ip address
huint32_t found = {0};
llarp_time_t min = std::numeric_limits< llarp_time_t >::max();
auto itr = m_IPActivity.begin();
while(itr != m_IPActivity.end())
{
if(itr->second < min)
{
2018-11-15 18:10:09 +00:00
found.h = itr->first.h;
min = itr->second;
}
++itr;
}
// kick old ident off exit
// TODO: DoS
PubKey pk = m_IPToKey[found];
KickIdentOffExit(pk);
return found;
}
bool
ExitEndpoint::QueueOutboundTraffic(const llarp_buffer_t &buf)
{
2019-02-02 23:12:42 +00:00
return llarp_ev_tun_async_write(&m_Tun, buf);
}
void
ExitEndpoint::KickIdentOffExit(const PubKey &pk)
{
LogInfo(Name(), " kicking ", pk, " off exit");
huint32_t ip = m_KeyToIP[pk];
m_KeyToIP.erase(pk);
m_IPToKey.erase(ip);
auto range = m_ActiveExits.equal_range(pk);
auto exit_itr = range.first;
while(exit_itr != range.second)
exit_itr = m_ActiveExits.erase(exit_itr);
}
void
ExitEndpoint::MarkIPActive(huint32_t ip)
{
m_IPActivity[ip] = GetRouter()->Now();
}
void
ExitEndpoint::OnInetPacket(const llarp_buffer_t &buf)
{
m_InetToNetwork.EmplaceIf(
[b = ManagedBuffer(buf)](Pkt_t &pkt) -> bool { return pkt.Load(b); });
}
bool
ExitEndpoint::QueueSNodePacket(const llarp_buffer_t &buf, huint32_t from)
{
net::IPv4Packet pkt;
if(!pkt.Load(buf))
return false;
// rewrite ip
pkt.UpdateIPv4PacketOnDst(from, m_IfAddr);
return llarp_ev_tun_async_write(&m_Tun, pkt.Buffer());
}
exit::Endpoint *
ExitEndpoint::FindEndpointByPath(const PathID_t &path)
2018-11-14 12:23:08 +00:00
{
exit::Endpoint *endpoint = nullptr;
PubKey pk;
2018-11-14 12:23:08 +00:00
{
auto itr = m_Paths.find(path);
if(itr == m_Paths.end())
return nullptr;
pk = itr->second;
}
{
auto itr = m_ActiveExits.find(pk);
if(itr != m_ActiveExits.end())
{
2018-11-15 21:47:05 +00:00
if(itr->second->PubKey() == pk)
endpoint = itr->second.get();
2018-11-14 12:23:08 +00:00
}
}
return endpoint;
}
bool
ExitEndpoint::UpdateEndpointPath(const PubKey &remote, const PathID_t &next)
2018-11-14 12:23:08 +00:00
{
// check if already mapped
auto itr = m_Paths.find(next);
if(itr != m_Paths.end())
return false;
m_Paths.emplace(next, remote);
2018-11-14 12:23:08 +00:00
return true;
}
bool
ExitEndpoint::SetOption(const std::string &k, const std::string &v)
{
2018-11-15 21:47:05 +00:00
if(k == "type" && v == "null")
{
m_ShouldInitTun = false;
return true;
}
if(k == "exit")
{
2018-11-14 12:23:08 +00:00
m_PermitExit = IsTrueValue(v.c_str());
return true;
}
if(k == "local-dns")
{
std::string resolverAddr = v;
uint16_t dnsport = 53;
auto pos = v.find(":");
if(pos != std::string::npos)
{
resolverAddr = v.substr(0, pos);
dnsport = std::atoi(v.substr(pos + 1).c_str());
}
m_LocalResolverAddr = Addr(resolverAddr, dnsport);
LogInfo(Name(), " local dns set to ", m_LocalResolverAddr);
}
if(k == "upstream-dns")
{
std::string resolverAddr = v;
uint16_t dnsport = 53;
auto pos = v.find(":");
if(pos != std::string::npos)
{
resolverAddr = v.substr(0, pos);
dnsport = std::atoi(v.substr(pos + 1).c_str());
}
m_UpstreamResolvers.emplace_back(resolverAddr, dnsport);
LogInfo(Name(), " adding upstream dns set to ", resolverAddr, ":",
dnsport);
}
if(k == "ifaddr")
{
auto pos = v.find("/");
if(pos == std::string::npos)
{
LogError(Name(), " ifaddr is not a cidr: ", v);
return false;
}
std::string nmask_str = v.substr(1 + pos);
std::string host_str = v.substr(0, pos);
// string, or just a plain char array?
strncpy(m_Tun.ifaddr, host_str.c_str(), sizeof(m_Tun.ifaddr) - 1);
m_Tun.netmask = std::atoi(nmask_str.c_str());
2018-11-15 13:47:46 +00:00
Addr ifaddr(host_str);
m_IfAddr = ifaddr.xtohl();
m_OurRange.netmask_bits = netmask_ipv4_bits(m_Tun.netmask);
m_OurRange.addr = m_IfAddr;
m_NextAddr = m_IfAddr;
m_HigestAddr = m_IfAddr | (~m_OurRange.netmask_bits);
LogInfo(Name(), " set ifaddr range to ", m_Tun.ifaddr, "/",
m_Tun.netmask, " lo=", m_IfAddr, " hi=", m_HigestAddr);
}
if(k == "ifname")
{
2018-12-28 15:10:05 +00:00
if(v.length() >= sizeof(m_Tun.ifname))
{
LogError(Name() + " ifname '", v, "' is too long");
2018-12-28 15:10:05 +00:00
return false;
}
strncpy(m_Tun.ifname, v.c_str(), sizeof(m_Tun.ifname) - 1);
LogInfo(Name(), " set ifname to ", m_Tun.ifname);
}
if(k == "exit-whitelist")
{
// add exit policy whitelist rule
// TODO: implement me
return true;
}
if(k == "exit-blacklist")
{
// add exit policy blacklist rule
// TODO: implement me
return true;
}
return true;
}
huint32_t
ExitEndpoint::ObtainServiceNodeIP(const RouterID &other)
{
PubKey pubKey(other);
huint32_t ip = GetIPForIdent(pubKey);
if(m_SNodeKeys.emplace(pubKey).second)
{
2019-04-23 16:13:22 +00:00
auto session = std::make_shared< exit::SNodeSession >(
other,
2019-04-23 16:13:22 +00:00
std::bind(&ExitEndpoint::QueueSNodePacket, this,
std::placeholders::_1, ip),
GetRouter(), 2, 1, true, false);
2019-04-23 16:13:22 +00:00
// this is a new service node make an outbound session to them
m_SNodeSessions.emplace(other, session);
}
return ip;
}
2018-11-14 12:23:08 +00:00
bool
ExitEndpoint::AllocateNewExit(const PubKey pk, const PathID_t &path,
bool wantInternet)
2018-11-14 12:23:08 +00:00
{
if(wantInternet && !m_PermitExit)
return false;
huint32_t ip = GetIPForIdent(pk);
if(GetRouter()->pathContext().TransitHopPreviousIsRouter(path,
pk.as_array()))
{
// we think this path belongs to a service node
// mark it as such so we don't make an outbound session to them
m_SNodeKeys.emplace(pk.as_array());
}
m_ActiveExits.emplace(pk,
std::make_unique< exit::Endpoint >(
pk, path, !wantInternet, ip, this));
2018-11-15 14:08:42 +00:00
m_Paths[path] = pk;
2018-11-15 21:47:05 +00:00
return HasLocalMappedAddrFor(pk);
2018-11-14 12:23:08 +00:00
}
std::string
ExitEndpoint::Name() const
{
return m_Name;
}
2018-11-14 12:23:08 +00:00
void
ExitEndpoint::DelEndpointInfo(const PathID_t &path)
2018-11-14 12:23:08 +00:00
{
m_Paths.erase(path);
}
2018-11-14 18:02:27 +00:00
void
ExitEndpoint::RemoveExit(const exit::Endpoint *ep)
2018-11-14 18:02:27 +00:00
{
auto range = m_ActiveExits.equal_range(ep->PubKey());
auto itr = range.first;
while(itr != range.second)
{
2018-11-15 21:47:05 +00:00
if(itr->second->LocalPath() == ep->LocalPath())
2018-11-14 18:02:27 +00:00
{
itr = m_ActiveExits.erase(itr);
// now ep is gone af
return;
}
++itr;
}
}
void
ExitEndpoint::Tick(llarp_time_t now)
{
{
auto itr = m_SNodeSessions.begin();
while(itr != m_SNodeSessions.end())
{
if(itr->second->IsExpired(now))
itr = m_SNodeSessions.erase(itr);
else
2019-04-23 16:13:22 +00:00
{
itr->second->Tick(now);
++itr;
2019-04-23 16:13:22 +00:00
}
}
}
{
2018-12-23 13:29:11 +00:00
// expire
auto itr = m_ActiveExits.begin();
while(itr != m_ActiveExits.end())
2018-11-14 18:02:27 +00:00
{
if(itr->second->IsExpired(now))
itr = m_ActiveExits.erase(itr);
else
++itr;
2018-12-23 13:29:11 +00:00
}
// pick chosen exits and tick
m_ChosenExits.clear();
itr = m_ActiveExits.begin();
while(itr != m_ActiveExits.end())
{
// do we have an exit set for this key?
if(m_ChosenExits.find(itr->first) != m_ChosenExits.end())
{
// yes
if(m_ChosenExits[itr->first]->createdAt < itr->second->createdAt)
{
// if the iterators's exit is newer use it for the chosen exit for
// key
2018-12-23 14:04:26 +00:00
if(!itr->second->LooksDead(now))
m_ChosenExits[itr->first] = itr->second.get();
2018-12-23 13:29:11 +00:00
}
}
2018-12-23 13:29:11 +00:00
else if(!itr->second->LooksDead(
now)) // set chosen exit if not dead for key that doesn't
// have one yet
m_ChosenExits[itr->first] = itr->second.get();
// tick which clears the tx rx counters
itr->second->Tick(now);
++itr;
2018-11-14 18:02:27 +00:00
}
}
}
} // namespace handlers
2018-11-21 23:37:17 +00:00
} // namespace llarp