2018-10-10 12:06:28 +00:00
|
|
|
#include <algorithm>
|
2022-07-16 00:41:14 +00:00
|
|
|
#include <iterator>
|
2021-03-15 16:01:19 +00:00
|
|
|
#include <variant>
|
2021-03-09 22:24:35 +00:00
|
|
|
#include "tun.hpp"
|
2018-09-23 16:48:43 +00:00
|
|
|
#include <sys/types.h>
|
2018-09-25 08:31:29 +00:00
|
|
|
#ifndef _WIN32
|
2018-09-23 16:48:43 +00:00
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <netdb.h>
|
2018-09-25 08:31:29 +00:00
|
|
|
#endif
|
2018-08-15 15:36:34 +00:00
|
|
|
|
2021-03-09 22:24:35 +00:00
|
|
|
#include <llarp/dns/dns.hpp>
|
|
|
|
#include <llarp/ev/ev.hpp>
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
#include <llarp/net/net.hpp>
|
2021-03-09 22:24:35 +00:00
|
|
|
#include <llarp/router/abstractrouter.hpp>
|
2022-04-07 20:44:23 +00:00
|
|
|
#include <llarp/router/route_poker.hpp>
|
2021-03-09 22:24:35 +00:00
|
|
|
#include <llarp/service/context.hpp>
|
|
|
|
#include <llarp/service/outbound_context.hpp>
|
|
|
|
#include <llarp/service/endpoint_state.hpp>
|
|
|
|
#include <llarp/service/outbound_context.hpp>
|
|
|
|
#include <llarp/service/name.hpp>
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
#include <llarp/service/protocol_type.hpp>
|
2021-03-09 22:24:35 +00:00
|
|
|
#include <llarp/util/meta/memfn.hpp>
|
|
|
|
#include <llarp/nodedb.hpp>
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
#include <llarp/quic/tunnel.hpp>
|
2021-03-09 22:24:35 +00:00
|
|
|
#include <llarp/rpc/endpoint_rpc.hpp>
|
|
|
|
#include <llarp/util/str.hpp>
|
2022-09-10 01:33:47 +00:00
|
|
|
#include <llarp/util/logging/buffer.hpp>
|
2021-03-09 22:24:35 +00:00
|
|
|
#include <llarp/dns/srv_data.hpp>
|
2022-07-28 16:07:38 +00:00
|
|
|
#include <llarp/constants/net.hpp>
|
2022-04-07 20:44:23 +00:00
|
|
|
#include <llarp/constants/platform.hpp>
|
|
|
|
|
2022-02-26 19:10:27 +00:00
|
|
|
#include <oxenc/bt.h>
|
2022-04-28 15:09:51 +00:00
|
|
|
|
2018-08-15 15:36:34 +00:00
|
|
|
namespace llarp
|
|
|
|
{
|
2018-08-16 14:34:15 +00:00
|
|
|
namespace handlers
|
2018-08-15 15:36:34 +00:00
|
|
|
{
|
2022-10-27 22:11:11 +00:00
|
|
|
static auto logcat = log::Cat("tun");
|
|
|
|
|
2022-04-07 20:44:23 +00:00
|
|
|
bool
|
|
|
|
TunEndpoint::MaybeHookDNS(
|
2022-07-28 16:07:38 +00:00
|
|
|
std::shared_ptr<dns::PacketSource_Base> source,
|
2022-04-07 20:44:23 +00:00
|
|
|
const dns::Message& query,
|
|
|
|
const SockAddr& to,
|
|
|
|
const SockAddr& from)
|
|
|
|
{
|
|
|
|
if (not ShouldHookDNSMessage(query))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
auto job = std::make_shared<dns::QueryJob>(source, query, to, from);
|
2022-07-28 16:07:38 +00:00
|
|
|
if (HandleHookedDNSMessage(query, [job](auto msg) { job->SendReply(msg.ToBuffer()); }))
|
|
|
|
Router()->TriggerPump();
|
|
|
|
else
|
2022-04-07 20:44:23 +00:00
|
|
|
job->Cancel();
|
|
|
|
return true;
|
|
|
|
}
|
2022-07-28 16:07:38 +00:00
|
|
|
|
|
|
|
/// Intercepts DNS IP packets on platforms where binding to a low port isn't viable.
|
|
|
|
/// (windows/macos/ios/android ... aka everything that is not linux... funny that)
|
2022-04-07 20:44:23 +00:00
|
|
|
class DnsInterceptor : public dns::PacketSource_Base
|
2021-03-02 18:18:22 +00:00
|
|
|
{
|
2022-07-28 16:07:38 +00:00
|
|
|
std::function<void(net::IPPacket)> m_Reply;
|
|
|
|
net::ipaddr_t m_OurIP;
|
2022-04-07 20:44:23 +00:00
|
|
|
llarp::DnsConfig m_Config;
|
2021-03-02 18:18:22 +00:00
|
|
|
|
2022-07-28 16:07:38 +00:00
|
|
|
public:
|
|
|
|
explicit DnsInterceptor(
|
|
|
|
std::function<void(net::IPPacket)> reply, net::ipaddr_t our_ip, llarp::DnsConfig conf)
|
|
|
|
: m_Reply{std::move(reply)}, m_OurIP{std::move(our_ip)}, m_Config{std::move(conf)}
|
2022-04-07 20:44:23 +00:00
|
|
|
{}
|
|
|
|
|
2022-09-16 19:27:12 +00:00
|
|
|
~DnsInterceptor() override = default;
|
2021-03-02 18:18:22 +00:00
|
|
|
|
|
|
|
void
|
2022-04-07 20:44:23 +00:00
|
|
|
SendTo(const SockAddr& to, const SockAddr& from, OwnedBuffer buf) const override
|
2021-03-02 18:18:22 +00:00
|
|
|
{
|
2022-07-28 16:07:38 +00:00
|
|
|
auto pkt = net::IPPacket::make_udp(from, to, std::move(buf));
|
|
|
|
|
|
|
|
if (pkt.empty())
|
2021-03-02 18:18:22 +00:00
|
|
|
return;
|
2022-07-28 16:07:38 +00:00
|
|
|
m_Reply(std::move(pkt));
|
2021-03-02 18:18:22 +00:00
|
|
|
}
|
2021-09-01 18:10:08 +00:00
|
|
|
|
2022-04-07 20:44:23 +00:00
|
|
|
void
|
|
|
|
Stop() override{};
|
|
|
|
|
|
|
|
std::optional<SockAddr>
|
|
|
|
BoundOn() const override
|
|
|
|
{
|
|
|
|
return std::nullopt;
|
|
|
|
}
|
|
|
|
|
2021-09-03 18:02:46 +00:00
|
|
|
bool
|
2022-07-28 16:07:38 +00:00
|
|
|
WouldLoop(const SockAddr& to, const SockAddr& from) const override
|
2021-09-03 18:02:46 +00:00
|
|
|
{
|
2022-09-16 19:27:12 +00:00
|
|
|
if constexpr (platform::is_apple)
|
|
|
|
{
|
|
|
|
// DNS on Apple is a bit weird because in order for the NetworkExtension itself to send
|
|
|
|
// data through the tunnel we have to proxy DNS requests through Apple APIs (and so our
|
|
|
|
// actual upstream DNS won't be set in our resolvers, which is why the vanilla WouldLoop
|
|
|
|
// won't work for us). However when active the mac also only queries the main tunnel IP
|
|
|
|
// for DNS, so we consider anything else to be upstream-bound DNS to let it through the
|
|
|
|
// tunnel.
|
|
|
|
return to.getIP() != m_OurIP;
|
|
|
|
}
|
2022-08-30 22:01:52 +00:00
|
|
|
else if (auto maybe_addr = m_Config.m_QueryBind)
|
2022-07-28 16:07:38 +00:00
|
|
|
{
|
|
|
|
const auto& addr = *maybe_addr;
|
|
|
|
// omit traffic to and from our dns socket
|
|
|
|
return addr == to or addr == from;
|
|
|
|
}
|
2022-04-07 20:44:23 +00:00
|
|
|
return false;
|
2021-09-01 18:10:08 +00:00
|
|
|
}
|
2021-03-02 18:18:22 +00:00
|
|
|
};
|
2019-11-29 18:37:19 +00:00
|
|
|
|
2022-04-07 20:44:23 +00:00
|
|
|
class TunDNS : public dns::Server
|
|
|
|
{
|
2022-07-28 16:07:38 +00:00
|
|
|
std::optional<SockAddr> m_QueryBind;
|
|
|
|
net::ipaddr_t m_OurIP;
|
2022-04-07 20:44:23 +00:00
|
|
|
TunEndpoint* const m_Endpoint;
|
|
|
|
|
|
|
|
public:
|
2022-07-28 16:07:38 +00:00
|
|
|
std::shared_ptr<dns::PacketSource_Base> PacketSource;
|
2022-04-07 20:44:23 +00:00
|
|
|
|
|
|
|
virtual ~TunDNS() = default;
|
2022-07-28 16:07:38 +00:00
|
|
|
|
2022-04-07 20:44:23 +00:00
|
|
|
explicit TunDNS(TunEndpoint* ep, const llarp::DnsConfig& conf)
|
2022-07-28 16:07:38 +00:00
|
|
|
: dns::Server{ep->Router()->loop(), conf, 0}
|
|
|
|
, m_QueryBind{conf.m_QueryBind}
|
|
|
|
, m_OurIP{ToNet(ep->GetIfAddr())}
|
|
|
|
, m_Endpoint{ep}
|
2022-04-07 20:44:23 +00:00
|
|
|
{}
|
|
|
|
|
|
|
|
std::shared_ptr<dns::PacketSource_Base>
|
|
|
|
MakePacketSourceOn(const SockAddr&, const llarp::DnsConfig& conf) override
|
|
|
|
{
|
2022-07-28 16:07:38 +00:00
|
|
|
auto ptr = std::make_shared<DnsInterceptor>(
|
|
|
|
[ep = m_Endpoint](auto pkt) {
|
|
|
|
ep->HandleWriteIPPacket(pkt.ConstBuffer(), pkt.srcv6(), pkt.dstv6(), 0);
|
|
|
|
},
|
|
|
|
m_OurIP,
|
|
|
|
conf);
|
2022-04-07 20:44:23 +00:00
|
|
|
PacketSource = ptr;
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-01-11 23:13:22 +00:00
|
|
|
TunEndpoint::TunEndpoint(AbstractRouter* r, service::Context* parent)
|
2022-06-22 16:14:33 +00:00
|
|
|
: service::Endpoint{r, parent}
|
2021-03-02 18:18:22 +00:00
|
|
|
{
|
2022-07-28 16:07:38 +00:00
|
|
|
m_PacketRouter = std::make_shared<vpn::PacketRouter>(
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
[this](net::IPPacket pkt) { HandleGotUserPacket(std::move(pkt)); });
|
2022-04-07 20:44:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
TunEndpoint::SetupDNS()
|
|
|
|
{
|
2022-07-28 16:07:38 +00:00
|
|
|
const auto& info = GetVPNInterface()->Info();
|
|
|
|
if (m_DnsConfig.m_raw_dns)
|
|
|
|
{
|
|
|
|
auto dns = std::make_shared<TunDNS>(this, m_DnsConfig);
|
|
|
|
m_DNS = dns;
|
2022-04-07 20:44:23 +00:00
|
|
|
|
2022-07-28 16:07:38 +00:00
|
|
|
m_PacketRouter->AddUDPHandler(huint16_t{53}, [this, dns](net::IPPacket pkt) {
|
|
|
|
auto dns_pkt_src = dns->PacketSource;
|
|
|
|
if (const auto& reply = pkt.reply)
|
|
|
|
dns_pkt_src = std::make_shared<dns::PacketSource_Wrapper>(dns_pkt_src, reply);
|
|
|
|
if (dns->MaybeHandlePacket(
|
|
|
|
std::move(dns_pkt_src), pkt.dst(), pkt.src(), *pkt.L4OwnedBuffer()))
|
|
|
|
return;
|
|
|
|
|
|
|
|
HandleGotUserPacket(std::move(pkt));
|
|
|
|
});
|
|
|
|
}
|
|
|
|
else
|
|
|
|
m_DNS = std::make_shared<dns::Server>(Loop(), m_DnsConfig, info.index);
|
|
|
|
|
2022-04-07 20:44:23 +00:00
|
|
|
m_DNS->AddResolver(weak_from_this());
|
|
|
|
m_DNS->Start();
|
2022-09-13 20:24:25 +00:00
|
|
|
|
|
|
|
if (m_DnsConfig.m_raw_dns)
|
|
|
|
{
|
|
|
|
if (auto vpn = Router()->GetVPNPlatform())
|
|
|
|
{
|
|
|
|
// get the first local address we know of
|
|
|
|
std::optional<SockAddr> localaddr;
|
|
|
|
for (auto res : m_DNS->GetAllResolvers())
|
|
|
|
{
|
|
|
|
if (auto ptr = res.lock())
|
|
|
|
{
|
|
|
|
localaddr = ptr->GetLocalAddr();
|
|
|
|
if (localaddr)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (platform::is_windows)
|
|
|
|
{
|
|
|
|
auto dns_io = vpn->create_packet_io(0, localaddr);
|
|
|
|
Router()->loop()->add_ticker([r = Router(), dns_io, handler = m_PacketRouter]() {
|
|
|
|
net::IPPacket pkt = dns_io->ReadNextPacket();
|
|
|
|
while (not pkt.empty())
|
|
|
|
{
|
|
|
|
handler->HandleIPPacket(std::move(pkt));
|
|
|
|
pkt = dns_io->ReadNextPacket();
|
|
|
|
}
|
|
|
|
});
|
|
|
|
m_RawDNS = dns_io;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (m_RawDNS)
|
|
|
|
m_RawDNS->Start();
|
|
|
|
}
|
2021-03-02 18:18:22 +00:00
|
|
|
}
|
2018-08-16 14:34:15 +00:00
|
|
|
|
2019-02-11 17:14:43 +00:00
|
|
|
util::StatusObject
|
|
|
|
TunEndpoint::ExtractStatus() const
|
2019-02-08 19:43:25 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
auto obj = service::Endpoint::ExtractStatus();
|
2019-08-19 09:33:26 +00:00
|
|
|
obj["ifaddr"] = m_OurRange.ToString();
|
2021-01-11 23:13:22 +00:00
|
|
|
obj["ifname"] = m_IfName;
|
2022-04-07 20:44:23 +00:00
|
|
|
|
|
|
|
std::vector<std::string> upstreamRes;
|
|
|
|
for (const auto& ent : m_DnsConfig.m_upstreamDNS)
|
|
|
|
upstreamRes.emplace_back(ent.ToString());
|
|
|
|
obj["ustreamResolvers"] = upstreamRes;
|
|
|
|
|
|
|
|
std::vector<std::string> localRes;
|
|
|
|
for (const auto& ent : m_DnsConfig.m_bind)
|
|
|
|
localRes.emplace_back(ent.ToString());
|
|
|
|
obj["localResolvers"] = localRes;
|
|
|
|
|
|
|
|
// for backwards compat
|
|
|
|
if (not m_DnsConfig.m_bind.empty())
|
|
|
|
obj["localResolver"] = localRes[0];
|
|
|
|
|
2019-02-11 17:14:43 +00:00
|
|
|
util::StatusObject ips{};
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& item : m_IPActivity)
|
2019-02-08 19:43:25 +00:00
|
|
|
{
|
2020-02-25 17:05:13 +00:00
|
|
|
util::StatusObject ipObj{{"lastActive", to_json(item.second)}};
|
2019-02-08 19:43:25 +00:00
|
|
|
std::string remoteStr;
|
2020-04-07 18:38:56 +00:00
|
|
|
AlignedBuffer<32> addr = m_IPToAddr.at(item.first);
|
|
|
|
if (m_SNodes.at(addr))
|
2019-02-08 19:43:25 +00:00
|
|
|
remoteStr = RouterID(addr.as_array()).ToString();
|
|
|
|
else
|
|
|
|
remoteStr = service::Address(addr.as_array()).ToString();
|
2020-04-07 18:38:56 +00:00
|
|
|
ipObj["remote"] = remoteStr;
|
2019-02-08 19:43:25 +00:00
|
|
|
std::string ipaddr = item.first.ToString();
|
2020-04-07 18:38:56 +00:00
|
|
|
ips[ipaddr] = ipObj;
|
2019-02-08 19:43:25 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
obj["addrs"] = ips;
|
|
|
|
obj["ourIP"] = m_OurIP.ToString();
|
2019-08-19 09:33:26 +00:00
|
|
|
obj["nextIP"] = m_NextIP.ToString();
|
2020-04-07 18:38:56 +00:00
|
|
|
obj["maxIP"] = m_MaxIP.ToString();
|
2019-02-11 17:14:43 +00:00
|
|
|
return obj;
|
2019-02-08 19:43:25 +00:00
|
|
|
}
|
|
|
|
|
2021-02-05 21:48:57 +00:00
|
|
|
void
|
|
|
|
TunEndpoint::Thaw()
|
|
|
|
{
|
2022-04-07 20:44:23 +00:00
|
|
|
if (m_DNS)
|
|
|
|
m_DNS->Reset();
|
2021-02-05 21:48:57 +00:00
|
|
|
}
|
|
|
|
|
2022-09-16 23:55:59 +00:00
|
|
|
void
|
2021-09-01 18:10:08 +00:00
|
|
|
TunEndpoint::ReconfigureDNS(std::vector<SockAddr> servers)
|
|
|
|
{
|
2022-09-16 19:27:12 +00:00
|
|
|
if (m_DNS)
|
|
|
|
{
|
|
|
|
for (auto weak : m_DNS->GetAllResolvers())
|
|
|
|
{
|
|
|
|
if (auto ptr = weak.lock())
|
2022-09-16 23:55:59 +00:00
|
|
|
ptr->ResetResolver(servers);
|
2022-09-16 19:27:12 +00:00
|
|
|
}
|
|
|
|
}
|
2021-09-01 18:10:08 +00:00
|
|
|
}
|
|
|
|
|
2018-08-16 14:34:15 +00:00
|
|
|
bool
|
2020-05-01 16:13:49 +00:00
|
|
|
TunEndpoint::Configure(const NetworkConfig& conf, const DnsConfig& dnsConf)
|
2018-08-16 14:34:15 +00:00
|
|
|
{
|
2020-05-01 16:13:49 +00:00
|
|
|
if (conf.m_reachable)
|
2020-02-11 21:48:36 +00:00
|
|
|
{
|
2020-05-04 16:51:57 +00:00
|
|
|
m_PublishIntroSet = true;
|
|
|
|
LogInfo(Name(), " setting to be reachable by default");
|
2020-02-11 21:48:36 +00:00
|
|
|
}
|
2020-05-01 16:13:49 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
m_PublishIntroSet = false;
|
|
|
|
LogInfo(Name(), " setting to be not reachable by default");
|
|
|
|
}
|
|
|
|
|
2022-01-17 12:57:08 +00:00
|
|
|
if (conf.m_AuthType == service::AuthType::eAuthTypeFile)
|
|
|
|
{
|
2022-04-01 16:52:25 +00:00
|
|
|
m_AuthPolicy = service::MakeFileAuthPolicy(m_router, conf.m_AuthFiles, conf.m_AuthFileType);
|
2022-01-17 12:57:08 +00:00
|
|
|
}
|
|
|
|
else if (conf.m_AuthType != service::AuthType::eAuthTypeNone)
|
2020-06-02 21:10:42 +00:00
|
|
|
{
|
2020-06-30 16:02:29 +00:00
|
|
|
std::string url, method;
|
|
|
|
if (conf.m_AuthUrl.has_value() and conf.m_AuthMethod.has_value())
|
|
|
|
{
|
|
|
|
url = *conf.m_AuthUrl;
|
|
|
|
method = *conf.m_AuthMethod;
|
|
|
|
}
|
2020-06-02 21:10:42 +00:00
|
|
|
auto auth = std::make_shared<rpc::EndpointAuthRPC>(
|
2021-12-26 21:48:27 +00:00
|
|
|
url,
|
|
|
|
method,
|
|
|
|
conf.m_AuthWhitelist,
|
|
|
|
conf.m_AuthStaticTokens,
|
|
|
|
Router()->lmq(),
|
|
|
|
shared_from_this());
|
2020-06-02 21:10:42 +00:00
|
|
|
auth->Start();
|
|
|
|
m_AuthPolicy = std::move(auth);
|
|
|
|
}
|
|
|
|
|
2022-04-07 20:44:23 +00:00
|
|
|
m_DnsConfig = dnsConf;
|
2021-04-14 19:40:57 +00:00
|
|
|
m_TrafficPolicy = conf.m_TrafficPolicy;
|
|
|
|
m_OwnedRanges = conf.m_OwnedRanges;
|
|
|
|
|
2021-03-18 15:59:02 +00:00
|
|
|
m_BaseV6Address = conf.m_baseV6Address;
|
|
|
|
|
2021-05-01 20:25:32 +00:00
|
|
|
if (conf.m_PathAlignmentTimeout)
|
|
|
|
{
|
|
|
|
m_PathAlignmentTimeout = *conf.m_PathAlignmentTimeout;
|
|
|
|
}
|
|
|
|
else
|
2021-05-11 09:12:02 +00:00
|
|
|
m_PathAlignmentTimeout = service::Endpoint::PathAlignmentTimeout();
|
2021-05-01 20:25:32 +00:00
|
|
|
|
2020-05-21 14:18:23 +00:00
|
|
|
for (const auto& item : conf.m_mapAddrs)
|
2018-08-22 15:52:10 +00:00
|
|
|
{
|
2020-05-21 14:18:23 +00:00
|
|
|
if (not MapAddress(item.second, item.first, false))
|
2020-05-01 16:13:49 +00:00
|
|
|
return false;
|
2018-08-22 15:52:10 +00:00
|
|
|
}
|
2020-05-01 16:13:49 +00:00
|
|
|
|
2021-01-11 23:13:22 +00:00
|
|
|
m_IfName = conf.m_ifname;
|
|
|
|
if (m_IfName.empty())
|
2020-10-09 15:39:39 +00:00
|
|
|
{
|
2022-07-09 15:05:52 +00:00
|
|
|
const auto maybe = m_router->Net().FindFreeTun();
|
2020-10-09 15:39:39 +00:00
|
|
|
if (not maybe.has_value())
|
|
|
|
throw std::runtime_error("cannot find free interface name");
|
2021-01-11 23:13:22 +00:00
|
|
|
m_IfName = *maybe;
|
2020-10-09 15:39:39 +00:00
|
|
|
}
|
2020-05-01 16:13:49 +00:00
|
|
|
|
2021-01-11 23:13:22 +00:00
|
|
|
m_OurRange = conf.m_ifaddr;
|
|
|
|
if (!m_OurRange.addr.h)
|
|
|
|
{
|
2022-07-09 15:05:52 +00:00
|
|
|
const auto maybe = m_router->Net().FindFreeRange();
|
2021-01-11 23:13:22 +00:00
|
|
|
if (not maybe.has_value())
|
2020-10-09 15:39:39 +00:00
|
|
|
{
|
2021-01-11 23:13:22 +00:00
|
|
|
throw std::runtime_error("cannot find free address range");
|
2020-10-09 15:39:39 +00:00
|
|
|
}
|
2021-01-11 23:13:22 +00:00
|
|
|
m_OurRange = *maybe;
|
2018-08-16 14:34:15 +00:00
|
|
|
}
|
2021-02-16 15:59:18 +00:00
|
|
|
|
2021-01-11 23:13:22 +00:00
|
|
|
m_OurIP = m_OurRange.addr;
|
2021-02-16 15:59:18 +00:00
|
|
|
m_UseV6 = false;
|
2021-04-19 15:20:46 +00:00
|
|
|
|
2021-06-17 16:05:50 +00:00
|
|
|
m_PersistAddrMapFile = conf.m_AddrMapPersistFile;
|
|
|
|
if (m_PersistAddrMapFile)
|
|
|
|
{
|
|
|
|
const auto& file = *m_PersistAddrMapFile;
|
|
|
|
if (fs::exists(file))
|
|
|
|
{
|
|
|
|
bool shouldLoadFile = true;
|
|
|
|
{
|
|
|
|
constexpr auto LastModifiedWindow = 1min;
|
|
|
|
const auto lastmodified = fs::last_write_time(file);
|
|
|
|
const auto now = decltype(lastmodified)::clock::now();
|
|
|
|
if (now < lastmodified or now - lastmodified > LastModifiedWindow)
|
|
|
|
{
|
|
|
|
shouldLoadFile = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
std::vector<char> data;
|
|
|
|
if (auto maybe = util::OpenFileStream<fs::ifstream>(file, std::ios_base::binary);
|
|
|
|
maybe and shouldLoadFile)
|
|
|
|
{
|
|
|
|
LogInfo(Name(), " loading address map file from ", file);
|
|
|
|
maybe->seekg(0, std::ios_base::end);
|
|
|
|
const size_t len = maybe->tellg();
|
|
|
|
maybe->seekg(0, std::ios_base::beg);
|
|
|
|
data.resize(len);
|
|
|
|
LogInfo(Name(), " reading ", len, " bytes");
|
|
|
|
maybe->read(data.data(), data.size());
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (shouldLoadFile)
|
|
|
|
{
|
|
|
|
LogInfo(Name(), " address map file ", file, " does not exist, so we won't load it");
|
|
|
|
}
|
|
|
|
else
|
|
|
|
LogInfo(Name(), " address map file ", file, " not loaded because it's stale");
|
|
|
|
}
|
|
|
|
if (not data.empty())
|
|
|
|
{
|
|
|
|
std::string_view bdata{data.data(), data.size()};
|
2021-06-23 11:30:30 +00:00
|
|
|
LogDebug(Name(), " parsing address map data: ", bdata);
|
2022-02-17 18:44:31 +00:00
|
|
|
const auto parsed = oxenc::bt_deserialize<oxenc::bt_dict>(bdata);
|
2021-06-17 16:05:50 +00:00
|
|
|
for (const auto& [key, value] : parsed)
|
|
|
|
{
|
|
|
|
huint128_t ip{};
|
|
|
|
if (not ip.FromString(key))
|
|
|
|
{
|
|
|
|
LogWarn(Name(), " malformed IP in addr map data: ", key);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (m_OurIP == ip)
|
|
|
|
continue;
|
|
|
|
if (not m_OurRange.Contains(ip))
|
|
|
|
{
|
|
|
|
LogWarn(Name(), " out of range IP in addr map data: ", ip);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
EndpointBase::AddressVariant_t addr;
|
|
|
|
|
|
|
|
if (const auto* str = std::get_if<std::string>(&value))
|
|
|
|
{
|
|
|
|
if (auto maybe = service::ParseAddress(*str))
|
|
|
|
{
|
|
|
|
addr = *maybe;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LogWarn(Name(), " invalid address in addr map: ", *str);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LogWarn(Name(), " invalid first entry in addr map, not a string");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (const auto* loki = std::get_if<service::Address>(&addr))
|
|
|
|
{
|
|
|
|
m_IPToAddr.emplace(ip, loki->data());
|
|
|
|
m_AddrToIP.emplace(loki->data(), ip);
|
|
|
|
m_SNodes[*loki] = false;
|
|
|
|
LogInfo(Name(), " remapped ", ip, " to ", *loki);
|
|
|
|
}
|
|
|
|
if (const auto* snode = std::get_if<RouterID>(&addr))
|
|
|
|
{
|
|
|
|
m_IPToAddr.emplace(ip, snode->data());
|
|
|
|
m_AddrToIP.emplace(snode->data(), ip);
|
|
|
|
m_SNodes[*snode] = true;
|
|
|
|
LogInfo(Name(), " remapped ", ip, " to ", *snode);
|
|
|
|
}
|
|
|
|
if (m_NextIP < ip)
|
|
|
|
m_NextIP = ip;
|
|
|
|
// make sure we dont unmap this guy
|
|
|
|
MarkIPActive(ip);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LogInfo(
|
|
|
|
Name(), " skipping loading addr map at ", file, " as it does not currently exist");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-19 15:20:46 +00:00
|
|
|
if (auto* quic = GetQUICTunnel())
|
|
|
|
{
|
|
|
|
quic->listen([this](std::string_view, uint16_t port) {
|
|
|
|
return llarp::SockAddr{net::TruncateV6(GetIfAddr()), huint16_t{port}};
|
|
|
|
});
|
|
|
|
}
|
2020-05-01 16:13:49 +00:00
|
|
|
return Endpoint::Configure(conf, dnsConf);
|
2018-08-16 14:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-11-14 12:23:08 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
TunEndpoint::HasLocalIP(const huint128_t& ip) const
|
2018-11-14 12:23:08 +00:00
|
|
|
{
|
|
|
|
return m_IPToAddr.find(ip) != m_IPToAddr.end();
|
|
|
|
}
|
|
|
|
|
2018-12-15 16:56:35 +00:00
|
|
|
void
|
2021-11-12 19:34:03 +00:00
|
|
|
TunEndpoint::Pump(llarp_time_t now)
|
2021-06-04 11:15:00 +00:00
|
|
|
{
|
2021-01-11 23:13:22 +00:00
|
|
|
// flush network to user
|
|
|
|
while (not m_NetworkToUserPktQueue.empty())
|
|
|
|
{
|
|
|
|
m_NetIf->WritePacket(m_NetworkToUserPktQueue.top().pkt);
|
|
|
|
m_NetworkToUserPktQueue.pop();
|
|
|
|
}
|
2021-11-12 19:34:03 +00:00
|
|
|
|
|
|
|
service::Endpoint::Pump(now);
|
2018-12-15 16:56:35 +00:00
|
|
|
}
|
|
|
|
|
2019-03-20 15:48:23 +00:00
|
|
|
static bool
|
2020-04-07 18:38:56 +00:00
|
|
|
is_random_snode(const dns::Message& msg)
|
2019-03-20 15:48:23 +00:00
|
|
|
{
|
2019-04-26 12:11:34 +00:00
|
|
|
return msg.questions[0].IsName("random.snode");
|
2019-03-20 15:48:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2020-04-07 18:38:56 +00:00
|
|
|
is_localhost_loki(const dns::Message& msg)
|
2019-03-20 15:48:23 +00:00
|
|
|
{
|
2020-08-31 23:25:58 +00:00
|
|
|
return msg.questions[0].IsLocalhost();
|
2019-03-20 15:48:23 +00:00
|
|
|
}
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
static dns::Message&
|
|
|
|
clear_dns_message(dns::Message& msg)
|
2020-02-13 15:44:43 +00:00
|
|
|
{
|
|
|
|
msg.authorities.resize(0);
|
|
|
|
msg.additional.resize(0);
|
|
|
|
msg.answers.resize(0);
|
|
|
|
msg.hdr_fields &= ~dns::flags_RCODENameError;
|
|
|
|
return msg;
|
|
|
|
}
|
|
|
|
|
2021-03-15 16:01:19 +00:00
|
|
|
std::optional<std::variant<service::Address, RouterID>>
|
|
|
|
TunEndpoint::ObtainAddrForIP(huint128_t ip) const
|
|
|
|
{
|
|
|
|
auto itr = m_IPToAddr.find(ip);
|
|
|
|
if (itr == m_IPToAddr.end())
|
|
|
|
return std::nullopt;
|
|
|
|
if (m_SNodes.at(itr->second))
|
|
|
|
return RouterID{itr->second.as_array()};
|
|
|
|
else
|
|
|
|
return service::Address{itr->second.as_array()};
|
|
|
|
}
|
|
|
|
|
2018-12-03 22:22:59 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
TunEndpoint::HandleHookedDNSMessage(dns::Message msg, std::function<void(dns::Message)> reply)
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
2021-05-01 12:55:54 +00:00
|
|
|
auto ReplyToSNodeDNSWhenReady = [this, reply](RouterID snode, auto msg, bool isV6) -> bool {
|
|
|
|
return EnsurePathToSNode(
|
2021-03-16 19:50:37 +00:00
|
|
|
snode,
|
2021-05-01 12:55:54 +00:00
|
|
|
[this, snode, msg, reply, isV6](
|
|
|
|
const RouterID&, exit::BaseSession_ptr s, [[maybe_unused]] service::ConvoTag tag) {
|
|
|
|
SendDNSReply(snode, s, msg, reply, isV6);
|
2021-03-16 19:50:37 +00:00
|
|
|
});
|
2020-03-12 12:19:37 +00:00
|
|
|
};
|
2021-05-11 09:12:02 +00:00
|
|
|
auto ReplyToLokiDNSWhenReady = [this, reply, timeout = PathAlignmentTimeout()](
|
2020-04-07 18:38:56 +00:00
|
|
|
service::Address addr, auto msg, bool isV6) -> bool {
|
2020-03-12 12:19:37 +00:00
|
|
|
using service::Address;
|
|
|
|
using service::OutboundContext;
|
2021-06-14 13:49:54 +00:00
|
|
|
if (HasInboundConvo(addr))
|
2021-06-14 12:17:44 +00:00
|
|
|
{
|
2021-06-14 13:49:54 +00:00
|
|
|
// if we have an inbound convo to this address don't mark as outbound so we don't have a
|
|
|
|
// state race this codepath is hit when an application verifies that reverse and forward
|
|
|
|
// dns records match for an inbound session
|
2021-06-14 12:17:44 +00:00
|
|
|
SendDNSReply(addr, this, msg, reply, isV6);
|
|
|
|
return true;
|
|
|
|
}
|
2021-06-05 12:57:01 +00:00
|
|
|
MarkAddressOutbound(addr);
|
2021-05-01 12:55:54 +00:00
|
|
|
return EnsurePathToService(
|
2020-03-12 12:19:37 +00:00
|
|
|
addr,
|
2021-05-01 12:55:54 +00:00
|
|
|
[this, addr, msg, reply, isV6](const Address&, OutboundContext* ctx) {
|
|
|
|
SendDNSReply(addr, ctx, msg, reply, isV6);
|
2020-03-12 12:19:37 +00:00
|
|
|
},
|
2021-05-01 20:25:32 +00:00
|
|
|
timeout);
|
2020-03-12 12:19:37 +00:00
|
|
|
};
|
2020-08-31 20:07:17 +00:00
|
|
|
|
2021-03-20 18:30:18 +00:00
|
|
|
auto ReplyToDNSWhenReady = [ReplyToLokiDNSWhenReady, ReplyToSNodeDNSWhenReady](
|
|
|
|
auto addr, auto msg, bool isV6) {
|
|
|
|
if (auto ptr = std::get_if<RouterID>(&addr))
|
|
|
|
{
|
|
|
|
ReplyToSNodeDNSWhenReady(*ptr, msg, isV6);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (auto ptr = std::get_if<service::Address>(&addr))
|
|
|
|
{
|
|
|
|
ReplyToLokiDNSWhenReady(*ptr, msg, isV6);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
};
|
2021-03-19 20:06:03 +00:00
|
|
|
|
2021-05-11 09:12:02 +00:00
|
|
|
auto ReplyToLokiSRVWhenReady = [this, reply, timeout = PathAlignmentTimeout()](
|
2021-05-01 20:25:32 +00:00
|
|
|
service::Address addr, auto msg) -> bool {
|
2020-08-31 20:07:17 +00:00
|
|
|
using service::Address;
|
|
|
|
using service::OutboundContext;
|
2021-06-14 12:17:44 +00:00
|
|
|
// TODO: how do we handle SRV record lookups for inbound sessions?
|
2021-06-05 12:57:01 +00:00
|
|
|
MarkAddressOutbound(addr);
|
2021-05-01 12:55:54 +00:00
|
|
|
return EnsurePathToService(
|
2020-08-31 20:07:17 +00:00
|
|
|
addr,
|
2021-05-01 12:55:54 +00:00
|
|
|
[msg, addr, reply](const Address&, OutboundContext* ctx) {
|
2020-08-31 20:07:17 +00:00
|
|
|
if (ctx == nullptr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
const auto& introset = ctx->GetCurrentIntroSet();
|
2020-08-31 23:25:58 +00:00
|
|
|
msg->AddSRVReply(introset.GetMatchingSRVRecords(addr.subdomain));
|
2020-08-31 20:07:17 +00:00
|
|
|
reply(*msg);
|
|
|
|
},
|
2021-05-01 20:25:32 +00:00
|
|
|
timeout);
|
2020-08-31 20:07:17 +00:00
|
|
|
};
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
if (msg.answers.size() > 0)
|
2020-02-12 20:43:37 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
const auto& answer = msg.answers[0];
|
|
|
|
if (answer.HasCNameForTLD(".snode"))
|
2020-02-13 15:44:43 +00:00
|
|
|
{
|
|
|
|
llarp_buffer_t buf(answer.rData);
|
2022-07-18 20:05:39 +00:00
|
|
|
auto qname = dns::DecodeName(&buf, true);
|
|
|
|
if (not qname)
|
2020-02-13 15:44:43 +00:00
|
|
|
return false;
|
|
|
|
RouterID addr;
|
2022-07-18 20:05:39 +00:00
|
|
|
if (not addr.FromString(*qname))
|
2020-02-13 15:44:43 +00:00
|
|
|
return false;
|
2020-04-07 18:38:56 +00:00
|
|
|
auto replyMsg = std::make_shared<dns::Message>(clear_dns_message(msg));
|
2020-03-12 12:19:37 +00:00
|
|
|
return ReplyToSNodeDNSWhenReady(addr, std::move(replyMsg), false);
|
2020-02-13 15:44:43 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (answer.HasCNameForTLD(".loki"))
|
2020-02-12 20:43:37 +00:00
|
|
|
{
|
|
|
|
llarp_buffer_t buf(answer.rData);
|
2022-07-18 20:05:39 +00:00
|
|
|
auto qname = dns::DecodeName(&buf, true);
|
|
|
|
if (not qname)
|
2020-02-12 20:43:37 +00:00
|
|
|
return false;
|
2020-09-17 19:18:08 +00:00
|
|
|
|
2020-02-12 20:43:37 +00:00
|
|
|
service::Address addr;
|
2022-07-18 20:05:39 +00:00
|
|
|
if (not addr.FromString(*qname))
|
2020-02-12 20:43:37 +00:00
|
|
|
return false;
|
2020-09-17 19:18:08 +00:00
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
auto replyMsg = std::make_shared<dns::Message>(clear_dns_message(msg));
|
2020-03-12 12:19:37 +00:00
|
|
|
return ReplyToLokiDNSWhenReady(addr, replyMsg, false);
|
2020-02-12 20:43:37 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
if (msg.questions.size() != 1)
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
|
|
|
llarp::LogWarn("bad number of dns questions: ", msg.questions.size());
|
|
|
|
return false;
|
|
|
|
}
|
2020-09-17 19:18:08 +00:00
|
|
|
std::string qname = msg.questions[0].Name();
|
2020-09-19 14:38:57 +00:00
|
|
|
const auto nameparts = split(qname, ".");
|
|
|
|
std::string lnsName;
|
|
|
|
if (nameparts.size() >= 2 and ends_with(qname, ".loki"))
|
|
|
|
{
|
|
|
|
lnsName = nameparts[nameparts.size() - 2];
|
|
|
|
lnsName += ".loki"sv;
|
|
|
|
}
|
2020-10-12 16:18:46 +00:00
|
|
|
if (msg.questions[0].qtype == dns::qTypeTXT)
|
|
|
|
{
|
|
|
|
RouterID snode;
|
|
|
|
if (snode.FromString(qname))
|
|
|
|
{
|
|
|
|
m_router->LookupRouter(snode, [reply, msg = std::move(msg)](const auto& found) mutable {
|
|
|
|
if (found.empty())
|
|
|
|
{
|
|
|
|
msg.AddNXReply();
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2022-07-16 00:41:14 +00:00
|
|
|
std::string recs;
|
2020-10-12 16:18:46 +00:00
|
|
|
for (const auto& rc : found)
|
2022-07-16 00:41:14 +00:00
|
|
|
recs += rc.ToTXTRecord();
|
|
|
|
msg.AddTXTReply(std::move(recs));
|
2020-10-12 16:18:46 +00:00
|
|
|
}
|
|
|
|
reply(msg);
|
|
|
|
});
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
else if (msg.questions[0].IsLocalhost() and msg.questions[0].HasSubdomains())
|
|
|
|
{
|
|
|
|
const auto subdomain = msg.questions[0].Subdomains();
|
|
|
|
if (subdomain == "exit")
|
|
|
|
{
|
|
|
|
if (HasExit())
|
|
|
|
{
|
2022-07-16 00:41:14 +00:00
|
|
|
std::string s;
|
|
|
|
m_ExitMap.ForEachEntry([&s](const auto& range, const auto& exit) {
|
|
|
|
fmt::format_to(std::back_inserter(s), "{}={}; ", range, exit);
|
2020-10-12 16:18:46 +00:00
|
|
|
});
|
2022-07-16 00:41:14 +00:00
|
|
|
msg.AddTXTReply(std::move(s));
|
2020-10-12 16:18:46 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
msg.AddNXReply();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (subdomain == "netid")
|
|
|
|
{
|
2022-07-16 00:41:14 +00:00
|
|
|
msg.AddTXTReply(fmt::format("netid={};", m_router->rc().netID));
|
2020-10-12 16:18:46 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
msg.AddNXReply();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
msg.AddNXReply();
|
|
|
|
}
|
2020-02-12 20:43:37 +00:00
|
|
|
|
2020-10-12 16:18:46 +00:00
|
|
|
reply(msg);
|
|
|
|
}
|
|
|
|
else if (msg.questions[0].qtype == dns::qTypeMX)
|
2018-12-07 21:52:19 +00:00
|
|
|
{
|
|
|
|
// mx record
|
2019-02-05 14:23:51 +00:00
|
|
|
service::Address addr;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (addr.FromString(qname, ".loki") || addr.FromString(qname, ".snode")
|
|
|
|
|| is_random_snode(msg) || is_localhost_loki(msg))
|
2021-02-25 15:35:50 +00:00
|
|
|
{
|
2018-12-07 21:52:19 +00:00
|
|
|
msg.AddMXReply(qname, 1);
|
2021-02-25 15:35:50 +00:00
|
|
|
}
|
|
|
|
else if (service::NameIsValid(lnsName))
|
|
|
|
{
|
2021-03-19 19:30:09 +00:00
|
|
|
LookupNameAsync(lnsName, [msg, lnsName, reply](auto maybe) mutable {
|
2021-03-03 15:37:31 +00:00
|
|
|
if (maybe.has_value())
|
|
|
|
{
|
2021-03-20 15:47:36 +00:00
|
|
|
var::visit([&](auto&& value) { msg.AddMXReply(value.ToString(), 1); }, *maybe);
|
2021-03-03 15:37:31 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
msg.AddNXReply();
|
|
|
|
}
|
|
|
|
reply(msg);
|
|
|
|
});
|
2021-03-19 19:30:09 +00:00
|
|
|
return true;
|
2021-02-25 15:35:50 +00:00
|
|
|
}
|
2018-12-07 21:52:19 +00:00
|
|
|
else
|
|
|
|
msg.AddNXReply();
|
2018-12-07 22:08:23 +00:00
|
|
|
reply(msg);
|
2018-12-07 21:52:19 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (msg.questions[0].qtype == dns::qTypeCNAME)
|
2019-01-10 15:49:08 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (is_random_snode(msg))
|
2019-01-10 15:49:08 +00:00
|
|
|
{
|
|
|
|
RouterID random;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (Router()->GetRandomGoodRouter(random))
|
2020-03-12 12:19:37 +00:00
|
|
|
{
|
2019-01-10 15:49:08 +00:00
|
|
|
msg.AddCNAMEReply(random.ToString(), 1);
|
2020-03-12 12:19:37 +00:00
|
|
|
}
|
2019-01-10 15:49:08 +00:00
|
|
|
else
|
|
|
|
msg.AddNXReply();
|
|
|
|
}
|
2020-10-31 15:59:03 +00:00
|
|
|
else if (msg.questions[0].IsLocalhost() and msg.questions[0].HasSubdomains())
|
|
|
|
{
|
|
|
|
const auto subdomain = msg.questions[0].Subdomains();
|
|
|
|
if (subdomain == "exit" and HasExit())
|
|
|
|
{
|
|
|
|
m_ExitMap.ForEachEntry(
|
|
|
|
[&msg](const auto&, const auto& exit) { msg.AddCNAMEReply(exit.ToString(), 1); });
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
msg.AddNXReply();
|
|
|
|
}
|
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (is_localhost_loki(msg))
|
2019-01-28 15:26:35 +00:00
|
|
|
{
|
|
|
|
size_t counter = 0;
|
|
|
|
context->ForEachService(
|
2020-04-07 18:38:56 +00:00
|
|
|
[&](const std::string&, const std::shared_ptr<service::Endpoint>& service) -> bool {
|
2019-07-05 14:41:26 +00:00
|
|
|
const service::Address addr = service->GetIdentity().pub.Addr();
|
2019-01-28 15:26:35 +00:00
|
|
|
msg.AddCNAMEReply(addr.ToString(), 1);
|
|
|
|
++counter;
|
|
|
|
return true;
|
|
|
|
});
|
2020-04-07 18:38:56 +00:00
|
|
|
if (counter == 0)
|
2019-01-28 15:26:35 +00:00
|
|
|
msg.AddNXReply();
|
|
|
|
}
|
2019-01-10 15:49:08 +00:00
|
|
|
else
|
|
|
|
msg.AddNXReply();
|
2019-02-05 14:03:38 +00:00
|
|
|
reply(msg);
|
2019-01-10 15:49:08 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (msg.questions[0].qtype == dns::qTypeA || msg.questions[0].qtype == dns::qTypeAAAA)
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
2020-03-09 19:40:43 +00:00
|
|
|
const bool isV6 = msg.questions[0].qtype == dns::qTypeAAAA;
|
2019-06-11 16:44:05 +00:00
|
|
|
const bool isV4 = msg.questions[0].qtype == dns::qTypeA;
|
2018-12-03 22:22:59 +00:00
|
|
|
llarp::service::Address addr;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (isV6 && !SupportsV6())
|
2020-03-09 20:03:44 +00:00
|
|
|
{ // empty reply but not a NXDOMAIN so that client can retry IPv4
|
|
|
|
msg.AddNSReply("localhost.loki.");
|
2020-03-09 19:40:43 +00:00
|
|
|
}
|
2019-02-05 03:19:06 +00:00
|
|
|
// on MacOS this is a typeA query
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (is_random_snode(msg))
|
2019-02-05 03:19:06 +00:00
|
|
|
{
|
2019-02-05 21:04:30 +00:00
|
|
|
RouterID random;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (Router()->GetRandomGoodRouter(random))
|
2020-03-12 12:19:37 +00:00
|
|
|
{
|
2019-02-05 21:04:30 +00:00
|
|
|
msg.AddCNAMEReply(random.ToString(), 1);
|
2020-04-07 18:38:56 +00:00
|
|
|
return ReplyToSNodeDNSWhenReady(random, std::make_shared<dns::Message>(msg), isV6);
|
2020-03-12 12:19:37 +00:00
|
|
|
}
|
2019-02-05 21:04:30 +00:00
|
|
|
else
|
|
|
|
msg.AddNXReply();
|
2019-02-05 03:19:06 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (is_localhost_loki(msg))
|
2018-12-13 00:03:19 +00:00
|
|
|
{
|
2020-10-31 15:59:03 +00:00
|
|
|
const bool lookingForExit = msg.questions[0].Subdomains() == "exit";
|
|
|
|
huint128_t ip = GetIfAddr();
|
|
|
|
if (ip.h)
|
|
|
|
{
|
|
|
|
if (lookingForExit)
|
|
|
|
{
|
|
|
|
if (HasExit())
|
|
|
|
{
|
|
|
|
m_ExitMap.ForEachEntry(
|
|
|
|
[&msg](const auto&, const auto& exit) { msg.AddCNAMEReply(exit.ToString()); });
|
|
|
|
msg.AddINReply(ip, isV6);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
msg.AddNXReply();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
msg.AddCNAMEReply(m_Identity.pub.Name(), 1);
|
|
|
|
msg.AddINReply(ip, isV6);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-12-13 00:03:19 +00:00
|
|
|
msg.AddNXReply();
|
2020-10-31 15:59:03 +00:00
|
|
|
}
|
2018-12-13 00:03:19 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (addr.FromString(qname, ".loki"))
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (isV4 && SupportsV6())
|
2018-12-07 20:56:01 +00:00
|
|
|
{
|
2019-06-11 18:23:53 +00:00
|
|
|
msg.hdr_fields |= dns::flags_QR | dns::flags_AA | dns::flags_RA;
|
2019-06-11 16:44:05 +00:00
|
|
|
}
|
2018-12-07 20:56:01 +00:00
|
|
|
else
|
2019-01-07 22:15:31 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
return ReplyToLokiDNSWhenReady(addr, std::make_shared<dns::Message>(msg), isV6);
|
2019-01-07 22:15:31 +00:00
|
|
|
}
|
2018-12-03 22:22:59 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (addr.FromString(qname, ".snode"))
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (isV4 && SupportsV6())
|
2019-06-12 13:48:14 +00:00
|
|
|
{
|
|
|
|
msg.hdr_fields |= dns::flags_QR | dns::flags_AA | dns::flags_RA;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2020-03-12 12:19:37 +00:00
|
|
|
return ReplyToSNodeDNSWhenReady(
|
2020-04-07 18:38:56 +00:00
|
|
|
addr.as_array(), std::make_shared<dns::Message>(msg), isV6);
|
2019-06-12 13:48:14 +00:00
|
|
|
}
|
2018-12-03 22:22:59 +00:00
|
|
|
}
|
2020-09-19 14:38:57 +00:00
|
|
|
else if (service::NameIsValid(lnsName))
|
2020-09-17 19:18:08 +00:00
|
|
|
{
|
2021-03-19 19:30:09 +00:00
|
|
|
LookupNameAsync(
|
2020-09-19 14:38:57 +00:00
|
|
|
lnsName,
|
2020-09-19 13:29:36 +00:00
|
|
|
[msg = std::make_shared<dns::Message>(msg),
|
|
|
|
name = Name(),
|
2020-09-19 14:38:57 +00:00
|
|
|
lnsName,
|
2020-09-19 13:29:36 +00:00
|
|
|
isV6,
|
|
|
|
reply,
|
2021-03-19 20:06:03 +00:00
|
|
|
ReplyToDNSWhenReady](auto maybe) {
|
2020-09-17 19:18:08 +00:00
|
|
|
if (not maybe.has_value())
|
|
|
|
{
|
2020-09-19 14:38:57 +00:00
|
|
|
LogWarn(name, " lns name ", lnsName, " not resolved");
|
2020-09-17 19:18:08 +00:00
|
|
|
msg->AddNXReply();
|
|
|
|
reply(*msg);
|
|
|
|
return;
|
|
|
|
}
|
2021-03-19 20:06:03 +00:00
|
|
|
ReplyToDNSWhenReady(*maybe, msg, isV6);
|
2020-09-17 19:18:08 +00:00
|
|
|
});
|
2021-03-19 19:30:09 +00:00
|
|
|
return true;
|
2020-09-17 19:18:08 +00:00
|
|
|
}
|
2018-12-03 22:22:59 +00:00
|
|
|
else
|
2018-12-04 16:16:43 +00:00
|
|
|
msg.AddNXReply();
|
|
|
|
|
|
|
|
reply(msg);
|
2018-12-03 22:22:59 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (msg.questions[0].qtype == dns::qTypePTR)
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
2018-12-04 16:16:43 +00:00
|
|
|
// reverse dns
|
2022-07-18 20:05:39 +00:00
|
|
|
if (auto ip = dns::DecodePTR(msg.questions[0].qname))
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
2022-07-18 20:05:39 +00:00
|
|
|
if (auto maybe = ObtainAddrForIP(*ip))
|
|
|
|
{
|
|
|
|
var::visit([&msg](auto&& result) { msg.AddAReply(result.ToString()); }, *maybe);
|
|
|
|
reply(msg);
|
|
|
|
return true;
|
|
|
|
}
|
2018-12-03 22:22:59 +00:00
|
|
|
}
|
2021-03-15 16:01:19 +00:00
|
|
|
|
2018-12-04 16:16:43 +00:00
|
|
|
msg.AddNXReply();
|
|
|
|
reply(msg);
|
2018-12-03 22:22:59 +00:00
|
|
|
return true;
|
|
|
|
}
|
2020-08-31 20:07:17 +00:00
|
|
|
else if (msg.questions[0].qtype == dns::qTypeSRV)
|
|
|
|
{
|
2022-04-16 16:41:34 +00:00
|
|
|
auto srv_for = msg.questions[0].Subdomains();
|
|
|
|
auto name = msg.questions[0].qname;
|
2020-08-31 20:07:17 +00:00
|
|
|
if (is_localhost_loki(msg))
|
|
|
|
{
|
2022-04-16 16:41:34 +00:00
|
|
|
msg.AddSRVReply(introSet().GetMatchingSRVRecords(srv_for));
|
2020-08-31 20:07:17 +00:00
|
|
|
reply(msg);
|
|
|
|
return true;
|
|
|
|
}
|
2022-04-16 16:41:34 +00:00
|
|
|
LookupServiceAsync(
|
|
|
|
name,
|
|
|
|
srv_for,
|
|
|
|
[reply, msg = std::make_shared<dns::Message>(std::move(msg))](auto records) {
|
|
|
|
if (records.empty())
|
|
|
|
{
|
|
|
|
msg->AddNXReply();
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
msg->AddSRVReply(records);
|
|
|
|
}
|
|
|
|
reply(*msg);
|
|
|
|
});
|
|
|
|
return true;
|
2020-08-31 20:07:17 +00:00
|
|
|
}
|
2018-12-03 22:22:59 +00:00
|
|
|
else
|
2018-12-04 16:16:43 +00:00
|
|
|
{
|
|
|
|
msg.AddNXReply();
|
|
|
|
reply(msg);
|
|
|
|
}
|
2018-12-03 22:22:59 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-09-19 15:22:28 +00:00
|
|
|
void
|
|
|
|
TunEndpoint::ResetInternalState()
|
2019-05-07 17:46:38 +00:00
|
|
|
{
|
|
|
|
service::Endpoint::ResetInternalState();
|
|
|
|
}
|
|
|
|
|
2019-06-11 16:44:05 +00:00
|
|
|
bool
|
|
|
|
TunEndpoint::SupportsV6() const
|
|
|
|
{
|
|
|
|
return m_UseV6;
|
|
|
|
}
|
|
|
|
|
2019-03-20 03:18:38 +00:00
|
|
|
// FIXME: pass in which question it should be addressing
|
2018-12-03 22:22:59 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
TunEndpoint::ShouldHookDNSMessage(const dns::Message& msg) const
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
|
|
|
llarp::service::Address addr;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (msg.questions.size() == 1)
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
2019-05-01 13:40:10 +00:00
|
|
|
/// hook every .loki
|
2020-04-07 18:38:56 +00:00
|
|
|
if (msg.questions[0].HasTLD(".loki"))
|
2018-12-13 00:03:19 +00:00
|
|
|
return true;
|
2019-05-01 13:40:10 +00:00
|
|
|
/// hook every .snode
|
2020-04-07 18:38:56 +00:00
|
|
|
if (msg.questions[0].HasTLD(".snode"))
|
2018-12-03 22:22:59 +00:00
|
|
|
return true;
|
2019-01-10 15:49:08 +00:00
|
|
|
// hook any ranges we own
|
2020-04-07 18:38:56 +00:00
|
|
|
if (msg.questions[0].qtype == llarp::dns::qTypePTR)
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
2022-07-18 20:05:39 +00:00
|
|
|
if (auto ip = dns::DecodePTR(msg.questions[0].qname))
|
|
|
|
return m_OurRange.Contains(*ip);
|
|
|
|
return false;
|
2018-12-03 22:22:59 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& answer : msg.answers)
|
2020-02-12 20:43:37 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (answer.HasCNameForTLD(".loki"))
|
2020-02-12 20:43:37 +00:00
|
|
|
return true;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (answer.HasCNameForTLD(".snode"))
|
2020-02-12 20:43:37 +00:00
|
|
|
return true;
|
|
|
|
}
|
2018-12-03 22:22:59 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-08-22 15:52:10 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
TunEndpoint::MapAddress(const service::Address& addr, huint128_t ip, bool SNode)
|
2018-08-22 15:52:10 +00:00
|
|
|
{
|
|
|
|
auto itr = m_IPToAddr.find(ip);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr != m_IPToAddr.end())
|
2018-08-22 15:52:10 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
llarp::LogWarn(
|
|
|
|
ip, " already mapped to ", service::Address(itr->second.as_array()).ToString());
|
2018-08-22 15:52:10 +00:00
|
|
|
return false;
|
|
|
|
}
|
2018-11-03 13:19:18 +00:00
|
|
|
llarp::LogInfo(Name() + " map ", addr.ToString(), " to ", ip);
|
2018-10-19 15:04:14 +00:00
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
m_IPToAddr[ip] = addr;
|
2019-01-02 01:04:08 +00:00
|
|
|
m_AddrToIP[addr] = ip;
|
2020-04-07 18:38:56 +00:00
|
|
|
m_SNodes[addr] = SNode;
|
2018-09-10 11:08:09 +00:00
|
|
|
MarkIPActiveForever(ip);
|
2021-06-05 13:28:42 +00:00
|
|
|
MarkAddressOutbound(addr);
|
2018-08-22 15:52:10 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:07:37 +00:00
|
|
|
std::string
|
|
|
|
TunEndpoint::GetIfName() const
|
|
|
|
{
|
2020-09-28 22:43:31 +00:00
|
|
|
#ifdef _WIN32
|
|
|
|
return net::TruncateV6(GetIfAddr()).ToString();
|
|
|
|
#else
|
2020-08-21 15:07:37 +00:00
|
|
|
return m_IfName;
|
2020-09-28 22:43:31 +00:00
|
|
|
#endif
|
2020-08-21 15:07:37 +00:00
|
|
|
}
|
|
|
|
|
2018-08-16 14:34:15 +00:00
|
|
|
bool
|
|
|
|
TunEndpoint::Start()
|
|
|
|
{
|
2022-04-07 20:44:23 +00:00
|
|
|
if (not Endpoint::Start())
|
2018-08-16 14:34:15 +00:00
|
|
|
return false;
|
2018-08-21 18:39:18 +00:00
|
|
|
return SetupNetworking();
|
2018-08-16 14:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-12-02 18:07:07 +00:00
|
|
|
bool
|
|
|
|
TunEndpoint::IsSNode() const
|
2018-11-30 14:14:30 +00:00
|
|
|
{
|
|
|
|
// TODO : implement me
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-08-16 14:34:15 +00:00
|
|
|
bool
|
|
|
|
TunEndpoint::SetupTun()
|
|
|
|
{
|
2021-01-11 23:13:22 +00:00
|
|
|
m_NextIP = m_OurIP;
|
|
|
|
m_MaxIP = m_OurRange.HighestAddr();
|
|
|
|
llarp::LogInfo(Name(), " set ", m_IfName, " to have address ", m_OurIP);
|
|
|
|
llarp::LogInfo(Name(), " allocated up to ", m_MaxIP, " on range ", m_OurRange);
|
2018-09-23 16:48:43 +00:00
|
|
|
|
2021-01-11 23:13:22 +00:00
|
|
|
const service::Address ourAddr = m_Identity.pub.Addr();
|
|
|
|
|
|
|
|
if (not MapAddress(ourAddr, GetIfAddr(), false))
|
2019-10-04 18:10:58 +00:00
|
|
|
{
|
2021-01-11 23:13:22 +00:00
|
|
|
return false;
|
2019-10-04 18:10:58 +00:00
|
|
|
}
|
2021-01-11 23:13:22 +00:00
|
|
|
|
|
|
|
vpn::InterfaceInfo info;
|
2022-07-28 16:07:38 +00:00
|
|
|
info.addrs.emplace_back(m_OurRange);
|
2021-03-18 15:59:02 +00:00
|
|
|
|
|
|
|
if (m_BaseV6Address)
|
|
|
|
{
|
|
|
|
IPRange v6range = m_OurRange;
|
2021-03-18 17:12:35 +00:00
|
|
|
v6range.addr = (*m_BaseV6Address) | m_OurRange.addr;
|
2021-03-18 15:59:02 +00:00
|
|
|
LogInfo(Name(), " using v6 range: ", v6range);
|
2022-07-28 16:07:38 +00:00
|
|
|
info.addrs.emplace_back(v6range, AF_INET6);
|
2021-03-18 15:59:02 +00:00
|
|
|
}
|
2021-02-16 15:59:18 +00:00
|
|
|
|
2021-01-11 23:13:22 +00:00
|
|
|
info.ifname = m_IfName;
|
2022-04-07 20:44:23 +00:00
|
|
|
|
2021-01-11 23:13:22 +00:00
|
|
|
LogInfo(Name(), " setting up network...");
|
|
|
|
|
|
|
|
try
|
2019-06-11 16:44:05 +00:00
|
|
|
{
|
2022-07-28 16:07:38 +00:00
|
|
|
m_NetIf = Router()->GetVPNPlatform()->CreateInterface(std::move(info), Router());
|
2019-06-11 16:44:05 +00:00
|
|
|
}
|
2021-01-11 23:13:22 +00:00
|
|
|
catch (std::exception& ex)
|
2019-06-11 16:44:05 +00:00
|
|
|
{
|
2021-01-11 23:13:22 +00:00
|
|
|
LogError(Name(), " failed to set up network interface: ", ex.what());
|
2019-10-04 18:10:58 +00:00
|
|
|
return false;
|
2018-09-23 16:48:43 +00:00
|
|
|
}
|
2022-07-28 16:07:38 +00:00
|
|
|
|
|
|
|
m_IfName = m_NetIf->Info().ifname;
|
2021-01-11 23:13:22 +00:00
|
|
|
LogInfo(Name(), " got network interface ", m_IfName);
|
2018-09-23 16:48:43 +00:00
|
|
|
|
2022-07-28 16:07:38 +00:00
|
|
|
auto handle_packet = [netif = m_NetIf, pkt_router = m_PacketRouter](auto pkt) {
|
|
|
|
pkt.reply = [netif](auto pkt) { netif->WritePacket(std::move(pkt)); };
|
|
|
|
pkt_router->HandleIPPacket(std::move(pkt));
|
|
|
|
};
|
|
|
|
|
|
|
|
if (not Router()->loop()->add_network_interface(m_NetIf, std::move(handle_packet)))
|
2019-11-29 00:37:58 +00:00
|
|
|
{
|
2021-01-11 23:13:22 +00:00
|
|
|
LogError(Name(), " failed to add network interface");
|
2019-11-29 00:37:58 +00:00
|
|
|
return false;
|
|
|
|
}
|
2022-04-07 20:44:23 +00:00
|
|
|
|
2021-09-03 19:17:28 +00:00
|
|
|
m_OurIPv6 = llarp::huint128_t{
|
|
|
|
llarp::uint128_t{0xfd2e'6c6f'6b69'0000, llarp::net::TruncateV6(m_OurRange.addr).h}};
|
2022-04-07 20:44:23 +00:00
|
|
|
|
|
|
|
if constexpr (not llarp::platform::is_apple)
|
2021-02-16 15:59:18 +00:00
|
|
|
{
|
2022-04-07 20:44:23 +00:00
|
|
|
if (auto maybe = m_router->Net().GetInterfaceIPv6Address(m_IfName))
|
|
|
|
{
|
|
|
|
m_OurIPv6 = *maybe;
|
|
|
|
LogInfo(Name(), " has ipv6 address ", m_OurIPv6);
|
|
|
|
}
|
2021-02-16 15:59:18 +00:00
|
|
|
}
|
2021-01-11 23:13:22 +00:00
|
|
|
|
2022-07-28 16:07:38 +00:00
|
|
|
LogInfo(Name(), " setting up dns...");
|
|
|
|
SetupDNS();
|
|
|
|
Loop()->call_soon([this]() { m_router->routePoker()->SetDNSMode(false); });
|
2019-11-29 00:37:58 +00:00
|
|
|
return HasAddress(ourAddr);
|
2018-08-16 14:34:15 +00:00
|
|
|
}
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
std::unordered_map<std::string, std::string>
|
2019-04-22 14:00:59 +00:00
|
|
|
TunEndpoint::NotifyParams() const
|
|
|
|
{
|
|
|
|
auto env = Endpoint::NotifyParams();
|
|
|
|
env.emplace("IP_ADDR", m_OurIP.ToString());
|
|
|
|
env.emplace("IF_ADDR", m_OurRange.ToString());
|
2021-01-11 23:13:22 +00:00
|
|
|
env.emplace("IF_NAME", m_IfName);
|
2019-05-06 12:42:21 +00:00
|
|
|
std::string strictConnect;
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& addr : m_StrictConnectAddrs)
|
2022-07-16 00:41:14 +00:00
|
|
|
strictConnect += addr.ToString() + " ";
|
2019-05-06 12:42:21 +00:00
|
|
|
env.emplace("STRICT_CONNECT_ADDRS", strictConnect);
|
2019-04-22 14:00:59 +00:00
|
|
|
return env;
|
|
|
|
}
|
|
|
|
|
2018-08-16 14:34:15 +00:00
|
|
|
bool
|
|
|
|
TunEndpoint::SetupNetworking()
|
|
|
|
{
|
2018-08-17 19:49:58 +00:00
|
|
|
llarp::LogInfo("Set Up networking for ", Name());
|
2022-04-07 20:44:23 +00:00
|
|
|
return SetupTun();
|
2018-08-16 14:34:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
TunEndpoint::Tick(llarp_time_t now)
|
|
|
|
{
|
2019-11-20 19:45:23 +00:00
|
|
|
Endpoint::Tick(now);
|
2018-08-16 14:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-12-24 16:09:05 +00:00
|
|
|
bool
|
|
|
|
TunEndpoint::Stop()
|
|
|
|
{
|
2022-07-28 16:07:38 +00:00
|
|
|
// stop vpn tunnel
|
|
|
|
if (m_NetIf)
|
|
|
|
m_NetIf->Stop();
|
|
|
|
if (m_RawDNS)
|
|
|
|
m_RawDNS->Stop();
|
2021-06-17 16:05:50 +00:00
|
|
|
// save address map if applicable
|
2022-07-28 16:07:38 +00:00
|
|
|
if (m_PersistAddrMapFile and not platform::is_android)
|
2021-06-17 16:05:50 +00:00
|
|
|
{
|
|
|
|
const auto& file = *m_PersistAddrMapFile;
|
|
|
|
LogInfo(Name(), " saving address map to ", file);
|
|
|
|
if (auto maybe = util::OpenFileStream<fs::ofstream>(file, std::ios_base::binary))
|
|
|
|
{
|
|
|
|
std::map<std::string, std::string> addrmap;
|
|
|
|
for (const auto& [ip, addr] : m_IPToAddr)
|
|
|
|
{
|
|
|
|
if (not m_SNodes.at(addr))
|
|
|
|
{
|
|
|
|
const service::Address a{addr.as_array()};
|
|
|
|
if (HasInboundConvo(a))
|
|
|
|
addrmap[ip.ToString()] = a.ToString();
|
|
|
|
}
|
|
|
|
}
|
2022-02-17 18:44:31 +00:00
|
|
|
const auto data = oxenc::bt_serialize(addrmap);
|
2021-06-17 16:05:50 +00:00
|
|
|
maybe->write(data.data(), data.size());
|
|
|
|
}
|
|
|
|
}
|
2022-04-07 20:44:23 +00:00
|
|
|
if (m_DNS)
|
|
|
|
m_DNS->Stop();
|
2018-12-24 16:09:05 +00:00
|
|
|
return llarp::service::Endpoint::Stop();
|
|
|
|
}
|
|
|
|
|
2021-12-01 18:12:44 +00:00
|
|
|
std::optional<service::Address>
|
|
|
|
TunEndpoint::ObtainExitAddressFor(
|
|
|
|
huint128_t ip,
|
|
|
|
std::function<service::Address(std::unordered_set<service::Address>)> exitSelectionStrat)
|
|
|
|
{
|
|
|
|
// is it already mapped? return the mapping
|
|
|
|
if (auto itr = m_ExitIPToExitAddress.find(ip); itr != m_ExitIPToExitAddress.end())
|
|
|
|
return itr->second;
|
2022-07-28 16:07:38 +00:00
|
|
|
|
|
|
|
const auto& net = m_router->Net();
|
|
|
|
const bool is_bogon = net.IsBogonIP(ip);
|
2021-12-01 18:12:44 +00:00
|
|
|
// build up our candidates to choose
|
2022-07-28 16:07:38 +00:00
|
|
|
|
2021-12-01 18:12:44 +00:00
|
|
|
std::unordered_set<service::Address> candidates;
|
|
|
|
for (const auto& entry : m_ExitMap.FindAllEntries(ip))
|
|
|
|
{
|
2022-07-28 16:07:38 +00:00
|
|
|
// in the event the exit's range is a bogon range, make sure the ip is located in that range
|
|
|
|
// to allow it
|
|
|
|
if ((is_bogon and net.IsBogonRange(entry.first) and entry.first.Contains(ip))
|
|
|
|
or entry.first.Contains(ip))
|
2021-12-01 18:12:44 +00:00
|
|
|
candidates.emplace(entry.second);
|
|
|
|
}
|
|
|
|
// no candidates? bail.
|
|
|
|
if (candidates.empty())
|
|
|
|
return std::nullopt;
|
|
|
|
if (not exitSelectionStrat)
|
|
|
|
{
|
|
|
|
// default strat to random choice
|
|
|
|
exitSelectionStrat = [](auto candidates) {
|
|
|
|
auto itr = candidates.begin();
|
|
|
|
std::advance(itr, llarp::randint() % candidates.size());
|
|
|
|
return *itr;
|
|
|
|
};
|
|
|
|
}
|
|
|
|
// map the exit and return the endpoint we mapped it to
|
|
|
|
return m_ExitIPToExitAddress.emplace(ip, exitSelectionStrat(candidates)).first->second;
|
|
|
|
}
|
|
|
|
|
2018-08-22 15:52:10 +00:00
|
|
|
void
|
2021-11-12 19:34:03 +00:00
|
|
|
TunEndpoint::HandleGotUserPacket(net::IPPacket pkt)
|
2018-08-22 15:52:10 +00:00
|
|
|
{
|
2021-11-12 19:34:03 +00:00
|
|
|
huint128_t dst, src;
|
|
|
|
if (pkt.IsV4())
|
|
|
|
{
|
|
|
|
dst = pkt.dst4to6();
|
|
|
|
src = pkt.src4to6();
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
dst = pkt.dstv6();
|
|
|
|
src = pkt.srcv6();
|
|
|
|
}
|
2021-01-11 23:13:22 +00:00
|
|
|
|
2022-07-07 19:50:26 +00:00
|
|
|
if constexpr (llarp::platform::is_apple)
|
|
|
|
{
|
|
|
|
if (dst == m_OurIP)
|
|
|
|
{
|
|
|
|
HandleWriteIPPacket(pkt.ConstBuffer(), src, dst, 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-12 19:34:03 +00:00
|
|
|
if (m_state->m_ExitEnabled)
|
|
|
|
{
|
|
|
|
dst = net::ExpandV4(net::TruncateV6(dst));
|
|
|
|
}
|
|
|
|
auto itr = m_IPToAddr.find(dst);
|
|
|
|
if (itr == m_IPToAddr.end())
|
|
|
|
{
|
2021-12-01 18:12:44 +00:00
|
|
|
service::Address addr{};
|
|
|
|
|
|
|
|
if (auto maybe = ObtainExitAddressFor(dst))
|
|
|
|
addr = *maybe;
|
|
|
|
else
|
2018-08-22 15:52:10 +00:00
|
|
|
{
|
2021-11-12 19:34:03 +00:00
|
|
|
// send icmp unreachable as we dont have any exits for this ip
|
|
|
|
if (const auto icmp = pkt.MakeICMPUnreachable())
|
|
|
|
HandleWriteIPPacket(icmp->ConstBuffer(), dst, src, 0);
|
2021-12-01 18:12:44 +00:00
|
|
|
|
2019-06-11 16:44:05 +00:00
|
|
|
return;
|
2018-08-22 15:52:10 +00:00
|
|
|
}
|
2022-07-28 16:07:38 +00:00
|
|
|
std::function<void(void)> extra_cb;
|
|
|
|
if (not HasFlowToService(addr))
|
|
|
|
{
|
|
|
|
extra_cb = [poker = Router()->routePoker()]() { poker->Up(); };
|
|
|
|
}
|
2021-11-12 19:34:03 +00:00
|
|
|
pkt.ZeroSourceAddress();
|
|
|
|
MarkAddressOutbound(addr);
|
|
|
|
EnsurePathToService(
|
|
|
|
addr,
|
2022-07-28 16:07:38 +00:00
|
|
|
[pkt, extra_cb, this](service::Address addr, service::OutboundContext* ctx) {
|
2021-11-12 19:34:03 +00:00
|
|
|
if (ctx)
|
2021-06-05 12:57:01 +00:00
|
|
|
{
|
2022-07-28 16:07:38 +00:00
|
|
|
if (extra_cb)
|
|
|
|
extra_cb();
|
2021-11-12 19:34:03 +00:00
|
|
|
ctx->SendPacketToRemote(pkt.ConstBuffer(), service::ProtocolType::Exit);
|
|
|
|
Router()->TriggerPump();
|
|
|
|
return;
|
2021-06-05 12:57:01 +00:00
|
|
|
}
|
2021-11-12 19:34:03 +00:00
|
|
|
LogWarn("cannot ensure path to exit ", addr, " so we drop some packets");
|
2021-06-02 19:28:03 +00:00
|
|
|
},
|
|
|
|
PathAlignmentTimeout());
|
2021-11-12 19:34:03 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
std::variant<service::Address, RouterID> to;
|
|
|
|
service::ProtocolType type;
|
|
|
|
if (m_SNodes.at(itr->second))
|
|
|
|
{
|
|
|
|
to = RouterID{itr->second.as_array()};
|
|
|
|
type = service::ProtocolType::TrafficV4;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
to = service::Address{itr->second.as_array()};
|
|
|
|
type = m_state->m_ExitEnabled and src != m_OurIP ? service::ProtocolType::Exit
|
|
|
|
: pkt.ServiceProtocol();
|
|
|
|
}
|
|
|
|
|
|
|
|
// prepare packet for insertion into network
|
|
|
|
// this includes clearing IP addresses, recalculating checksums, etc
|
|
|
|
// this does not happen for exits because the point is they don't rewrite addresses
|
|
|
|
if (type != service::ProtocolType::Exit)
|
|
|
|
{
|
|
|
|
if (pkt.IsV4())
|
|
|
|
pkt.UpdateIPv4Address({0}, {0});
|
|
|
|
else
|
|
|
|
pkt.UpdateIPv6Address({0}, {0});
|
|
|
|
}
|
|
|
|
// try sending it on an existing convotag
|
|
|
|
// this succeds for inbound convos, probably.
|
|
|
|
if (auto maybe = GetBestConvoTagFor(to))
|
|
|
|
{
|
|
|
|
if (SendToOrQueue(*maybe, pkt.ConstBuffer(), type))
|
|
|
|
{
|
|
|
|
MarkIPActive(dst);
|
|
|
|
Router()->TriggerPump();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// try establishing a path to this guy
|
|
|
|
// will fail if it's an inbound convo
|
|
|
|
EnsurePathTo(
|
|
|
|
to,
|
|
|
|
[pkt, type, dst, to, this](auto maybe) {
|
|
|
|
if (not maybe)
|
|
|
|
{
|
|
|
|
var::visit(
|
|
|
|
[this](auto&& addr) {
|
|
|
|
LogWarn(Name(), " failed to ensure path to ", addr, " no convo tag found");
|
|
|
|
},
|
|
|
|
to);
|
|
|
|
}
|
|
|
|
if (SendToOrQueue(*maybe, pkt.ConstBuffer(), type))
|
|
|
|
{
|
|
|
|
MarkIPActive(dst);
|
|
|
|
Router()->TriggerPump();
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
var::visit(
|
|
|
|
[this](auto&& addr) {
|
|
|
|
LogWarn(Name(), " failed to send to ", addr, ", SendToOrQueue failed");
|
|
|
|
},
|
|
|
|
to);
|
|
|
|
}
|
|
|
|
},
|
|
|
|
PathAlignmentTimeout());
|
2018-08-22 15:52:10 +00:00
|
|
|
}
|
|
|
|
|
2021-04-14 15:07:06 +00:00
|
|
|
bool
|
|
|
|
TunEndpoint::ShouldAllowTraffic(const net::IPPacket& pkt) const
|
|
|
|
{
|
|
|
|
if (const auto exitPolicy = GetExitPolicy())
|
|
|
|
{
|
|
|
|
if (not exitPolicy->AllowsTraffic(pkt))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-09-18 11:08:47 +00:00
|
|
|
bool
|
2020-05-21 14:18:23 +00:00
|
|
|
TunEndpoint::HandleInboundPacket(
|
2020-10-08 11:59:01 +00:00
|
|
|
const service::ConvoTag tag,
|
|
|
|
const llarp_buffer_t& buf,
|
|
|
|
service::ProtocolType t,
|
|
|
|
uint64_t seqno)
|
2020-05-21 14:18:23 +00:00
|
|
|
{
|
2021-04-01 01:40:29 +00:00
|
|
|
LogTrace("Inbound ", t, " packet (", buf.sz, "B) on convo ", tag);
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
if (t == service::ProtocolType::QUIC)
|
|
|
|
{
|
|
|
|
auto* quic = GetQUICTunnel();
|
|
|
|
if (!quic)
|
|
|
|
{
|
|
|
|
LogWarn("incoming quic packet but this endpoint is not quic capable; dropping");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (buf.sz < 4)
|
|
|
|
{
|
|
|
|
LogWarn("invalid incoming quic packet, dropping");
|
|
|
|
return false;
|
|
|
|
}
|
2021-03-26 20:45:19 +00:00
|
|
|
LogInfo("tag active T=", tag);
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
quic->receive_packet(tag, buf);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-03-08 20:48:11 +00:00
|
|
|
if (t != service::ProtocolType::TrafficV4 && t != service::ProtocolType::TrafficV6
|
|
|
|
&& t != service::ProtocolType::Exit)
|
2020-05-21 14:18:23 +00:00
|
|
|
return false;
|
2021-03-15 16:01:19 +00:00
|
|
|
std::variant<service::Address, RouterID> addr;
|
|
|
|
if (auto maybe = GetEndpointWithConvoTag(tag))
|
|
|
|
{
|
|
|
|
addr = *maybe;
|
|
|
|
}
|
|
|
|
else
|
2020-05-21 14:18:23 +00:00
|
|
|
return false;
|
|
|
|
huint128_t src, dst;
|
|
|
|
|
|
|
|
net::IPPacket pkt;
|
|
|
|
if (not pkt.Load(buf))
|
|
|
|
return false;
|
2021-01-11 23:13:22 +00:00
|
|
|
|
2020-06-24 13:24:07 +00:00
|
|
|
if (m_state->m_ExitEnabled)
|
2020-05-21 14:18:23 +00:00
|
|
|
{
|
|
|
|
// exit side from exit
|
2021-04-14 15:07:06 +00:00
|
|
|
|
|
|
|
// check packet against exit policy and if as needed
|
|
|
|
if (not ShouldAllowTraffic(pkt))
|
|
|
|
return false;
|
|
|
|
|
2021-03-15 16:01:19 +00:00
|
|
|
src = ObtainIPForAddr(addr);
|
2021-03-08 20:48:11 +00:00
|
|
|
if (t == service::ProtocolType::Exit)
|
2021-01-11 23:13:22 +00:00
|
|
|
{
|
|
|
|
if (pkt.IsV4())
|
2021-03-04 01:08:38 +00:00
|
|
|
dst = pkt.dst4to6();
|
2021-01-11 23:13:22 +00:00
|
|
|
else if (pkt.IsV6())
|
2021-02-16 15:59:18 +00:00
|
|
|
{
|
2021-01-11 23:13:22 +00:00
|
|
|
dst = pkt.dstv6();
|
2021-02-16 15:59:18 +00:00
|
|
|
src = net::ExpandV4Lan(net::TruncateV6(src));
|
|
|
|
}
|
2021-01-11 23:13:22 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// non exit traffic on exit
|
|
|
|
dst = m_OurIP;
|
|
|
|
}
|
2020-05-21 14:18:23 +00:00
|
|
|
}
|
2021-03-08 20:48:11 +00:00
|
|
|
else if (t == service::ProtocolType::Exit)
|
2020-06-24 13:24:07 +00:00
|
|
|
{
|
|
|
|
// client side exit traffic from exit
|
|
|
|
if (pkt.IsV4())
|
2021-01-11 23:13:22 +00:00
|
|
|
{
|
|
|
|
dst = m_OurIP;
|
2021-03-04 01:08:38 +00:00
|
|
|
src = pkt.src4to6();
|
2021-01-11 23:13:22 +00:00
|
|
|
}
|
2020-06-24 13:24:07 +00:00
|
|
|
else if (pkt.IsV6())
|
2021-01-11 23:13:22 +00:00
|
|
|
{
|
2021-02-17 12:54:18 +00:00
|
|
|
dst = m_OurIPv6;
|
2020-06-24 13:24:07 +00:00
|
|
|
src = pkt.srcv6();
|
2021-01-11 23:13:22 +00:00
|
|
|
}
|
2020-06-24 13:24:07 +00:00
|
|
|
// find what exit we think this should be for
|
2021-05-15 13:19:45 +00:00
|
|
|
service::Address fromAddr{};
|
|
|
|
if (const auto* ptr = std::get_if<service::Address>(&addr))
|
|
|
|
{
|
|
|
|
fromAddr = *ptr;
|
|
|
|
}
|
|
|
|
else // don't allow snode
|
|
|
|
return false;
|
2021-12-01 18:12:44 +00:00
|
|
|
// make sure the mapping matches
|
|
|
|
if (auto itr = m_ExitIPToExitAddress.find(src); itr != m_ExitIPToExitAddress.end())
|
2021-03-15 16:01:19 +00:00
|
|
|
{
|
2021-12-01 18:12:44 +00:00
|
|
|
if (itr->second != fromAddr)
|
|
|
|
return false;
|
2020-06-24 13:24:07 +00:00
|
|
|
}
|
2021-12-01 18:12:44 +00:00
|
|
|
else
|
2021-04-14 15:07:06 +00:00
|
|
|
return false;
|
2020-06-24 13:24:07 +00:00
|
|
|
}
|
2020-05-21 14:18:23 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
// snapp traffic
|
2021-03-15 16:01:19 +00:00
|
|
|
src = ObtainIPForAddr(addr);
|
2020-05-21 14:18:23 +00:00
|
|
|
dst = m_OurIP;
|
|
|
|
}
|
2020-10-08 11:59:01 +00:00
|
|
|
HandleWriteIPPacket(buf, src, dst, seqno);
|
2020-05-21 14:18:23 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2020-10-08 11:59:01 +00:00
|
|
|
TunEndpoint::HandleWriteIPPacket(
|
|
|
|
const llarp_buffer_t& b, huint128_t src, huint128_t dst, uint64_t seqno)
|
2018-08-18 14:01:21 +00:00
|
|
|
{
|
2019-02-03 00:48:10 +00:00
|
|
|
ManagedBuffer buf(b);
|
2020-10-08 11:59:01 +00:00
|
|
|
WritePacket write;
|
|
|
|
write.seqno = seqno;
|
|
|
|
auto& pkt = write.pkt;
|
|
|
|
// load
|
|
|
|
if (!pkt.Load(buf))
|
2021-03-02 18:18:22 +00:00
|
|
|
{
|
2020-10-08 11:59:01 +00:00
|
|
|
return false;
|
2021-03-02 18:18:22 +00:00
|
|
|
}
|
2020-10-08 11:59:01 +00:00
|
|
|
if (pkt.IsV4())
|
|
|
|
{
|
|
|
|
pkt.UpdateIPv4Address(xhtonl(net::TruncateV6(src)), xhtonl(net::TruncateV6(dst)));
|
|
|
|
}
|
|
|
|
else if (pkt.IsV6())
|
|
|
|
{
|
|
|
|
pkt.UpdateIPv6Address(src, dst);
|
|
|
|
}
|
|
|
|
m_NetworkToUserPktQueue.push(std::move(write));
|
2021-11-12 19:34:03 +00:00
|
|
|
// wake up so we ensure that all packets are written to user
|
|
|
|
Router()->TriggerPump();
|
2020-10-08 11:59:01 +00:00
|
|
|
return true;
|
2018-08-20 19:12:12 +00:00
|
|
|
}
|
2018-10-19 15:04:14 +00:00
|
|
|
|
2019-06-11 16:44:05 +00:00
|
|
|
huint128_t
|
2018-11-14 12:23:08 +00:00
|
|
|
TunEndpoint::GetIfAddr() const
|
2018-10-19 14:53:06 +00:00
|
|
|
{
|
2018-11-14 12:23:08 +00:00
|
|
|
return m_OurIP;
|
2018-10-19 14:53:06 +00:00
|
|
|
}
|
2018-08-20 19:12:12 +00:00
|
|
|
|
2019-06-11 16:44:05 +00:00
|
|
|
huint128_t
|
2021-03-15 16:01:19 +00:00
|
|
|
TunEndpoint::ObtainIPForAddr(std::variant<service::Address, RouterID> addr)
|
2018-08-20 19:12:12 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
llarp_time_t now = Now();
|
2019-06-11 16:44:05 +00:00
|
|
|
huint128_t nextIP = {0};
|
2021-03-15 16:01:19 +00:00
|
|
|
AlignedBuffer<32> ident{};
|
|
|
|
bool snode = false;
|
|
|
|
|
2021-04-20 13:44:52 +00:00
|
|
|
var::visit([&ident](auto&& val) { ident = val.data(); }, addr);
|
2021-03-15 16:01:19 +00:00
|
|
|
|
|
|
|
if (std::get_if<RouterID>(&addr))
|
|
|
|
{
|
|
|
|
snode = true;
|
|
|
|
}
|
|
|
|
|
2018-08-21 18:17:16 +00:00
|
|
|
{
|
|
|
|
// previously allocated address
|
2018-11-14 20:56:54 +00:00
|
|
|
auto itr = m_AddrToIP.find(ident);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr != m_AddrToIP.end())
|
2018-08-22 15:52:10 +00:00
|
|
|
{
|
|
|
|
// mark ip active
|
2018-09-16 12:25:17 +00:00
|
|
|
MarkIPActive(itr->second);
|
2018-08-21 18:17:16 +00:00
|
|
|
return itr->second;
|
2018-08-22 15:52:10 +00:00
|
|
|
}
|
2018-08-21 18:17:16 +00:00
|
|
|
}
|
2018-09-13 16:41:53 +00:00
|
|
|
// allocate new address
|
2020-04-07 18:38:56 +00:00
|
|
|
if (m_NextIP < m_MaxIP)
|
2018-08-21 18:17:16 +00:00
|
|
|
{
|
2018-09-19 14:49:42 +00:00
|
|
|
do
|
|
|
|
{
|
|
|
|
nextIP = ++m_NextIP;
|
2020-04-07 18:38:56 +00:00
|
|
|
} while (m_IPToAddr.find(nextIP) != m_IPToAddr.end() && m_NextIP < m_MaxIP);
|
|
|
|
if (nextIP < m_MaxIP)
|
2018-09-19 14:49:42 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
m_AddrToIP[ident] = nextIP;
|
2018-11-14 21:40:44 +00:00
|
|
|
m_IPToAddr[nextIP] = ident;
|
2020-04-07 18:38:56 +00:00
|
|
|
m_SNodes[ident] = snode;
|
2021-06-02 19:26:47 +00:00
|
|
|
var::visit(
|
|
|
|
[&](auto&& remote) { llarp::LogInfo(Name(), " mapped ", remote, " to ", nextIP); },
|
|
|
|
addr);
|
2018-09-19 14:49:42 +00:00
|
|
|
MarkIPActive(nextIP);
|
|
|
|
return nextIP;
|
|
|
|
}
|
2018-08-21 18:17:16 +00:00
|
|
|
}
|
|
|
|
|
2018-09-19 14:49:42 +00:00
|
|
|
// we are full
|
|
|
|
// expire least active ip
|
|
|
|
// TODO: prevent DoS
|
2020-04-07 18:38:56 +00:00
|
|
|
std::pair<huint128_t, llarp_time_t> oldest = {huint128_t{0}, 0s};
|
2018-09-19 14:49:42 +00:00
|
|
|
|
|
|
|
// find oldest entry
|
|
|
|
auto itr = m_IPActivity.begin();
|
2020-04-07 18:38:56 +00:00
|
|
|
while (itr != m_IPActivity.end())
|
2018-09-19 14:49:42 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr->second <= now)
|
2018-08-21 18:17:16 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if ((now - itr->second) > oldest.second)
|
2018-08-21 18:17:16 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
oldest.first = itr->first;
|
2018-09-19 14:49:42 +00:00
|
|
|
oldest.second = itr->second;
|
2018-08-21 18:17:16 +00:00
|
|
|
}
|
|
|
|
}
|
2018-09-19 14:49:42 +00:00
|
|
|
++itr;
|
2018-08-21 18:17:16 +00:00
|
|
|
}
|
2018-09-19 14:49:42 +00:00
|
|
|
// remap address
|
2018-11-14 21:47:58 +00:00
|
|
|
m_IPToAddr[oldest.first] = ident;
|
2020-04-07 18:38:56 +00:00
|
|
|
m_AddrToIP[ident] = oldest.first;
|
|
|
|
m_SNodes[ident] = snode;
|
|
|
|
nextIP = oldest.first;
|
2018-08-21 18:17:16 +00:00
|
|
|
|
|
|
|
// mark ip active
|
2018-09-10 16:36:36 +00:00
|
|
|
m_IPActivity[nextIP] = std::max(m_IPActivity[nextIP], now);
|
2018-08-20 19:12:12 +00:00
|
|
|
|
|
|
|
return nextIP;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2019-06-11 16:44:05 +00:00
|
|
|
TunEndpoint::HasRemoteForIP(huint128_t ip) const
|
2018-08-20 19:12:12 +00:00
|
|
|
{
|
|
|
|
return m_IPToAddr.find(ip) != m_IPToAddr.end();
|
2018-08-18 14:01:21 +00:00
|
|
|
}
|
|
|
|
|
2018-08-21 18:17:16 +00:00
|
|
|
void
|
2019-06-11 16:44:05 +00:00
|
|
|
TunEndpoint::MarkIPActive(huint128_t ip)
|
2018-08-21 18:17:16 +00:00
|
|
|
{
|
2019-06-11 21:28:55 +00:00
|
|
|
llarp::LogDebug(Name(), " address ", ip, " is active");
|
2018-10-29 16:48:36 +00:00
|
|
|
m_IPActivity[ip] = std::max(Now(), m_IPActivity[ip]);
|
2018-08-21 18:17:16 +00:00
|
|
|
}
|
|
|
|
|
2018-09-10 11:08:09 +00:00
|
|
|
void
|
2019-06-11 16:44:05 +00:00
|
|
|
TunEndpoint::MarkIPActiveForever(huint128_t ip)
|
2018-09-10 11:08:09 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
m_IPActivity[ip] = std::numeric_limits<llarp_time_t>::max();
|
2018-09-10 11:08:09 +00:00
|
|
|
}
|
|
|
|
|
2019-07-30 23:42:13 +00:00
|
|
|
TunEndpoint::~TunEndpoint() = default;
|
2018-08-15 15:36:34 +00:00
|
|
|
|
2018-08-16 14:34:15 +00:00
|
|
|
} // namespace handlers
|
|
|
|
} // namespace llarp
|