2018-10-10 12:06:28 +00:00
|
|
|
#include <algorithm>
|
2021-03-15 16:01:19 +00:00
|
|
|
#include <variant>
|
2018-08-22 07:35:49 +00:00
|
|
|
// harmless on other platforms
|
|
|
|
#define __USE_MINGW_ANSI_STDIO 1
|
2021-03-09 22:24:35 +00:00
|
|
|
#include "tun.hpp"
|
2018-09-23 16:48:43 +00:00
|
|
|
#include <sys/types.h>
|
2018-09-25 08:31:29 +00:00
|
|
|
#ifndef _WIN32
|
2018-09-23 16:48:43 +00:00
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <netdb.h>
|
2018-09-25 08:31:29 +00:00
|
|
|
#endif
|
2018-08-15 15:36:34 +00:00
|
|
|
|
2021-03-09 22:24:35 +00:00
|
|
|
#include <llarp/dns/dns.hpp>
|
|
|
|
#include <llarp/ev/ev.hpp>
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
#include <llarp/net/net.hpp>
|
2021-03-09 22:24:35 +00:00
|
|
|
#include <llarp/router/abstractrouter.hpp>
|
|
|
|
#include <llarp/service/context.hpp>
|
|
|
|
#include <llarp/service/outbound_context.hpp>
|
|
|
|
#include <llarp/service/endpoint_state.hpp>
|
|
|
|
#include <llarp/service/outbound_context.hpp>
|
|
|
|
#include <llarp/service/name.hpp>
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
#include <llarp/service/protocol_type.hpp>
|
2021-03-09 22:24:35 +00:00
|
|
|
#include <llarp/util/meta/memfn.hpp>
|
|
|
|
#include <llarp/nodedb.hpp>
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
#include <llarp/quic/tunnel.hpp>
|
2021-03-09 22:24:35 +00:00
|
|
|
#include <llarp/rpc/endpoint_rpc.hpp>
|
2018-12-10 16:26:46 +00:00
|
|
|
|
2021-03-09 22:24:35 +00:00
|
|
|
#include <llarp/util/str.hpp>
|
|
|
|
#include <llarp/util/endian.hpp>
|
2019-05-22 16:20:03 +00:00
|
|
|
|
2021-03-09 22:24:35 +00:00
|
|
|
#include <llarp/dns/srv_data.hpp>
|
2020-08-31 20:07:17 +00:00
|
|
|
|
2018-08-15 15:36:34 +00:00
|
|
|
namespace llarp
|
|
|
|
{
|
2018-08-16 14:34:15 +00:00
|
|
|
namespace handlers
|
2018-08-15 15:36:34 +00:00
|
|
|
{
|
2021-03-02 02:06:20 +00:00
|
|
|
// Intercepts DNS IP packets going to an IP on the tun interface; this is currently used on
|
|
|
|
// Android where binding to a DNS port (i.e. via llarp::dns::Proxy) isn't possible because of OS
|
|
|
|
// restrictions, but a tun interface *is* available.
|
|
|
|
class DnsInterceptor : public dns::PacketHandler
|
2021-03-02 18:18:22 +00:00
|
|
|
{
|
2021-03-02 02:06:20 +00:00
|
|
|
public:
|
2021-03-02 18:18:22 +00:00
|
|
|
TunEndpoint* const m_Endpoint;
|
|
|
|
|
2021-03-02 02:06:20 +00:00
|
|
|
explicit DnsInterceptor(AbstractRouter* router, TunEndpoint* ep)
|
2021-03-02 07:02:59 +00:00
|
|
|
: dns::PacketHandler{router->loop(), ep}, m_Endpoint{ep} {};
|
2021-03-02 18:18:22 +00:00
|
|
|
|
|
|
|
void
|
2021-03-02 02:06:20 +00:00
|
|
|
SendServerMessageBufferTo(
|
2021-03-08 11:59:55 +00:00
|
|
|
const SockAddr& to, const SockAddr& from, llarp_buffer_t buf) override
|
2021-03-02 18:18:22 +00:00
|
|
|
{
|
2021-03-29 16:31:55 +00:00
|
|
|
const auto pkt = net::IPPacket::UDP(
|
|
|
|
from.getIPv4(),
|
|
|
|
ToNet(huint16_t{from.getPort()}),
|
|
|
|
to.getIPv4(),
|
|
|
|
ToNet(huint16_t{to.getPort()}),
|
|
|
|
buf);
|
2021-03-02 18:18:22 +00:00
|
|
|
|
2021-03-29 16:31:55 +00:00
|
|
|
if (pkt.sz == 0)
|
2021-03-02 18:18:22 +00:00
|
|
|
return;
|
|
|
|
m_Endpoint->HandleWriteIPPacket(
|
2021-03-08 11:59:55 +00:00
|
|
|
pkt.ConstBuffer(), net::ExpandV4(from.asIPv4()), net::ExpandV4(to.asIPv4()), 0);
|
2021-03-02 18:18:22 +00:00
|
|
|
}
|
|
|
|
};
|
2019-11-29 18:37:19 +00:00
|
|
|
|
2021-01-11 23:13:22 +00:00
|
|
|
TunEndpoint::TunEndpoint(AbstractRouter* r, service::Context* parent)
|
2020-04-30 19:40:20 +00:00
|
|
|
: service::Endpoint(r, parent)
|
2021-03-02 07:02:59 +00:00
|
|
|
, m_UserToNetworkPktQueue("endpoint_sendq", r->loop(), r->loop())
|
2021-03-02 18:18:22 +00:00
|
|
|
{
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
m_PacketRouter = std::make_unique<vpn::PacketRouter>(
|
|
|
|
[this](net::IPPacket pkt) { HandleGotUserPacket(std::move(pkt)); });
|
2021-03-02 22:49:25 +00:00
|
|
|
#ifdef ANDROID
|
2021-03-02 02:06:20 +00:00
|
|
|
m_Resolver = std::make_shared<DnsInterceptor>(r, this);
|
2021-03-02 18:18:22 +00:00
|
|
|
m_PacketRouter->AddUDPHandler(huint16_t{53}, [&](net::IPPacket pkt) {
|
|
|
|
const size_t ip_header_size = (pkt.Header()->ihl * 4);
|
|
|
|
|
|
|
|
const uint8_t* ptr = pkt.buf + ip_header_size;
|
|
|
|
const auto dst = ToNet(pkt.dstv4());
|
|
|
|
const auto src = ToNet(pkt.srcv4());
|
2021-03-09 16:02:41 +00:00
|
|
|
const SockAddr laddr{src, nuint16_t{*reinterpret_cast<const uint16_t*>(ptr)}};
|
|
|
|
const SockAddr raddr{dst, nuint16_t{*reinterpret_cast<const uint16_t*>(ptr + 2)}};
|
2021-03-02 18:18:22 +00:00
|
|
|
|
2021-03-02 22:49:25 +00:00
|
|
|
OwnedBuffer buf{pkt.sz - (udp_header_size + ip_header_size)};
|
|
|
|
std::copy_n(ptr + udp_header_size, buf.sz, buf.buf.get());
|
2021-03-02 18:18:22 +00:00
|
|
|
if (m_Resolver->ShouldHandlePacket(laddr, raddr, buf))
|
2021-03-02 22:49:25 +00:00
|
|
|
m_Resolver->HandlePacket(laddr, raddr, buf);
|
2021-03-02 18:18:22 +00:00
|
|
|
else
|
|
|
|
HandleGotUserPacket(std::move(pkt));
|
|
|
|
});
|
|
|
|
#else
|
2021-03-02 07:02:59 +00:00
|
|
|
m_Resolver = std::make_shared<dns::Proxy>(r->loop(), this);
|
2021-03-02 18:18:22 +00:00
|
|
|
#endif
|
|
|
|
}
|
2018-08-16 14:34:15 +00:00
|
|
|
|
2019-02-11 17:14:43 +00:00
|
|
|
util::StatusObject
|
|
|
|
TunEndpoint::ExtractStatus() const
|
2019-02-08 19:43:25 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
auto obj = service::Endpoint::ExtractStatus();
|
2019-08-19 09:33:26 +00:00
|
|
|
obj["ifaddr"] = m_OurRange.ToString();
|
2021-01-11 23:13:22 +00:00
|
|
|
obj["ifname"] = m_IfName;
|
2020-04-07 18:38:56 +00:00
|
|
|
std::vector<std::string> resolvers;
|
|
|
|
for (const auto& addr : m_UpstreamResolvers)
|
2020-05-06 20:38:44 +00:00
|
|
|
resolvers.emplace_back(addr.toString());
|
2019-08-19 09:33:26 +00:00
|
|
|
obj["ustreamResolvers"] = resolvers;
|
2020-05-06 20:38:44 +00:00
|
|
|
obj["localResolver"] = m_LocalResolverAddr.toString();
|
2019-02-11 17:14:43 +00:00
|
|
|
util::StatusObject ips{};
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& item : m_IPActivity)
|
2019-02-08 19:43:25 +00:00
|
|
|
{
|
2020-02-25 17:05:13 +00:00
|
|
|
util::StatusObject ipObj{{"lastActive", to_json(item.second)}};
|
2019-02-08 19:43:25 +00:00
|
|
|
std::string remoteStr;
|
2020-04-07 18:38:56 +00:00
|
|
|
AlignedBuffer<32> addr = m_IPToAddr.at(item.first);
|
|
|
|
if (m_SNodes.at(addr))
|
2019-02-08 19:43:25 +00:00
|
|
|
remoteStr = RouterID(addr.as_array()).ToString();
|
|
|
|
else
|
|
|
|
remoteStr = service::Address(addr.as_array()).ToString();
|
2020-04-07 18:38:56 +00:00
|
|
|
ipObj["remote"] = remoteStr;
|
2019-02-08 19:43:25 +00:00
|
|
|
std::string ipaddr = item.first.ToString();
|
2020-04-07 18:38:56 +00:00
|
|
|
ips[ipaddr] = ipObj;
|
2019-02-08 19:43:25 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
obj["addrs"] = ips;
|
|
|
|
obj["ourIP"] = m_OurIP.ToString();
|
2019-08-19 09:33:26 +00:00
|
|
|
obj["nextIP"] = m_NextIP.ToString();
|
2020-04-07 18:38:56 +00:00
|
|
|
obj["maxIP"] = m_MaxIP.ToString();
|
2019-02-11 17:14:43 +00:00
|
|
|
return obj;
|
2019-02-08 19:43:25 +00:00
|
|
|
}
|
|
|
|
|
2021-02-05 21:48:57 +00:00
|
|
|
void
|
|
|
|
TunEndpoint::Thaw()
|
|
|
|
{
|
|
|
|
if (m_Resolver)
|
|
|
|
m_Resolver->Restart();
|
|
|
|
}
|
|
|
|
|
2018-08-16 14:34:15 +00:00
|
|
|
bool
|
2020-05-01 16:13:49 +00:00
|
|
|
TunEndpoint::Configure(const NetworkConfig& conf, const DnsConfig& dnsConf)
|
2018-08-16 14:34:15 +00:00
|
|
|
{
|
2020-05-01 16:13:49 +00:00
|
|
|
if (conf.m_reachable)
|
2020-02-11 21:48:36 +00:00
|
|
|
{
|
2020-05-04 16:51:57 +00:00
|
|
|
m_PublishIntroSet = true;
|
|
|
|
LogInfo(Name(), " setting to be reachable by default");
|
2020-02-11 21:48:36 +00:00
|
|
|
}
|
2020-05-01 16:13:49 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
m_PublishIntroSet = false;
|
|
|
|
LogInfo(Name(), " setting to be not reachable by default");
|
|
|
|
}
|
|
|
|
|
2020-06-30 16:02:29 +00:00
|
|
|
if (conf.m_AuthType != service::AuthType::eAuthTypeNone)
|
2020-06-02 21:10:42 +00:00
|
|
|
{
|
2020-06-30 16:02:29 +00:00
|
|
|
std::string url, method;
|
|
|
|
if (conf.m_AuthUrl.has_value() and conf.m_AuthMethod.has_value())
|
|
|
|
{
|
|
|
|
url = *conf.m_AuthUrl;
|
|
|
|
method = *conf.m_AuthMethod;
|
|
|
|
}
|
2020-06-02 21:10:42 +00:00
|
|
|
auto auth = std::make_shared<rpc::EndpointAuthRPC>(
|
2020-06-30 16:02:29 +00:00
|
|
|
url, method, conf.m_AuthWhitelist, Router()->lmq(), shared_from_this());
|
2020-06-02 21:10:42 +00:00
|
|
|
auth->Start();
|
|
|
|
m_AuthPolicy = std::move(auth);
|
|
|
|
}
|
|
|
|
|
2020-05-01 16:13:49 +00:00
|
|
|
/*
|
|
|
|
* TODO: reinstate this option (it's not even clear what section this came from...)
|
|
|
|
*
|
2020-04-07 18:38:56 +00:00
|
|
|
if (k == "isolate-network" && IsTrueValue(v.c_str()))
|
2019-05-22 16:20:03 +00:00
|
|
|
{
|
2019-05-22 16:20:50 +00:00
|
|
|
#if defined(__linux__)
|
|
|
|
LogInfo(Name(), " isolating network...");
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!SpawnIsolatedNetwork())
|
2019-05-22 16:20:50 +00:00
|
|
|
{
|
|
|
|
LogError(Name(), " failed to spawn isolated network");
|
2019-05-22 16:20:03 +00:00
|
|
|
return false;
|
2019-05-22 16:20:50 +00:00
|
|
|
}
|
|
|
|
LogInfo(Name(), " booyeah network isolation succeeded");
|
|
|
|
return true;
|
|
|
|
#else
|
2020-04-07 18:38:56 +00:00
|
|
|
LogError(Name(), " network isolation is not supported on your platform");
|
2019-05-22 16:20:50 +00:00
|
|
|
return false;
|
|
|
|
#endif
|
2019-05-22 16:20:03 +00:00
|
|
|
}
|
2020-05-01 16:13:49 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TODO: this is currently defined for [router] / RouterConfig, but is clearly an [endpoint]
|
|
|
|
* option. either move it to [endpoint] or plumb RouterConfig through
|
|
|
|
*
|
2020-04-07 18:38:56 +00:00
|
|
|
if (k == "strict-connect")
|
2019-05-06 12:42:21 +00:00
|
|
|
{
|
|
|
|
RouterID connect;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!connect.FromString(v))
|
2019-05-06 12:42:21 +00:00
|
|
|
{
|
|
|
|
LogError(Name(), " invalid snode for strict-connect: ", v);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
RouterContact rc;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!m_router->nodedb()->Get(connect, rc))
|
2019-05-06 12:42:21 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
LogError(
|
|
|
|
Name(), " we don't have the RC for ", v, " so we can't use it in strict-connect");
|
2019-05-06 12:42:21 +00:00
|
|
|
return false;
|
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& ai : rc.addrs)
|
2019-05-06 12:42:21 +00:00
|
|
|
{
|
|
|
|
m_StrictConnectAddrs.emplace_back(ai);
|
2020-04-07 18:38:56 +00:00
|
|
|
LogInfo(Name(), " added ", m_StrictConnectAddrs.back(), " to strict connect");
|
2019-05-06 12:42:21 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2020-05-01 16:13:49 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
m_LocalResolverAddr = dnsConf.m_bind;
|
|
|
|
m_UpstreamResolvers = dnsConf.m_upstreamDNS;
|
|
|
|
|
2021-03-18 15:59:02 +00:00
|
|
|
m_BaseV6Address = conf.m_baseV6Address;
|
|
|
|
|
2020-05-21 14:18:23 +00:00
|
|
|
for (const auto& item : conf.m_mapAddrs)
|
2018-08-22 15:52:10 +00:00
|
|
|
{
|
2020-05-21 14:18:23 +00:00
|
|
|
if (not MapAddress(item.second, item.first, false))
|
2020-05-01 16:13:49 +00:00
|
|
|
return false;
|
2018-08-22 15:52:10 +00:00
|
|
|
}
|
2020-05-01 16:13:49 +00:00
|
|
|
|
2021-01-11 23:13:22 +00:00
|
|
|
m_IfName = conf.m_ifname;
|
|
|
|
if (m_IfName.empty())
|
2020-10-09 15:39:39 +00:00
|
|
|
{
|
|
|
|
const auto maybe = llarp::FindFreeTun();
|
|
|
|
if (not maybe.has_value())
|
|
|
|
throw std::runtime_error("cannot find free interface name");
|
2021-01-11 23:13:22 +00:00
|
|
|
m_IfName = *maybe;
|
2020-10-09 15:39:39 +00:00
|
|
|
}
|
2020-05-01 16:13:49 +00:00
|
|
|
|
2021-01-11 23:13:22 +00:00
|
|
|
m_OurRange = conf.m_ifaddr;
|
|
|
|
if (!m_OurRange.addr.h)
|
|
|
|
{
|
|
|
|
const auto maybe = llarp::FindFreeRange();
|
|
|
|
if (not maybe.has_value())
|
2020-10-09 15:39:39 +00:00
|
|
|
{
|
2021-01-11 23:13:22 +00:00
|
|
|
throw std::runtime_error("cannot find free address range");
|
2020-10-09 15:39:39 +00:00
|
|
|
}
|
2021-01-11 23:13:22 +00:00
|
|
|
m_OurRange = *maybe;
|
2018-08-16 14:34:15 +00:00
|
|
|
}
|
2021-02-16 15:59:18 +00:00
|
|
|
|
2021-01-11 23:13:22 +00:00
|
|
|
m_OurIP = m_OurRange.addr;
|
2021-02-16 15:59:18 +00:00
|
|
|
m_UseV6 = false;
|
2020-05-01 16:13:49 +00:00
|
|
|
return Endpoint::Configure(conf, dnsConf);
|
2018-08-16 14:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-11-14 12:23:08 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
TunEndpoint::HasLocalIP(const huint128_t& ip) const
|
2018-11-14 12:23:08 +00:00
|
|
|
{
|
|
|
|
return m_IPToAddr.find(ip) != m_IPToAddr.end();
|
|
|
|
}
|
|
|
|
|
2018-12-15 16:56:35 +00:00
|
|
|
void
|
|
|
|
TunEndpoint::Flush()
|
|
|
|
{
|
2020-05-21 14:18:23 +00:00
|
|
|
FlushSend();
|
|
|
|
Pump(Now());
|
2021-01-11 23:13:22 +00:00
|
|
|
// flush network to user
|
|
|
|
while (not m_NetworkToUserPktQueue.empty())
|
|
|
|
{
|
|
|
|
m_NetIf->WritePacket(m_NetworkToUserPktQueue.top().pkt);
|
|
|
|
m_NetworkToUserPktQueue.pop();
|
|
|
|
}
|
2018-12-15 16:56:35 +00:00
|
|
|
}
|
|
|
|
|
2019-03-20 15:48:23 +00:00
|
|
|
static bool
|
2020-04-07 18:38:56 +00:00
|
|
|
is_random_snode(const dns::Message& msg)
|
2019-03-20 15:48:23 +00:00
|
|
|
{
|
2019-04-26 12:11:34 +00:00
|
|
|
return msg.questions[0].IsName("random.snode");
|
2019-03-20 15:48:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2020-04-07 18:38:56 +00:00
|
|
|
is_localhost_loki(const dns::Message& msg)
|
2019-03-20 15:48:23 +00:00
|
|
|
{
|
2020-08-31 23:25:58 +00:00
|
|
|
return msg.questions[0].IsLocalhost();
|
2019-03-20 15:48:23 +00:00
|
|
|
}
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
static dns::Message&
|
|
|
|
clear_dns_message(dns::Message& msg)
|
2020-02-13 15:44:43 +00:00
|
|
|
{
|
|
|
|
msg.authorities.resize(0);
|
|
|
|
msg.additional.resize(0);
|
|
|
|
msg.answers.resize(0);
|
|
|
|
msg.hdr_fields &= ~dns::flags_RCODENameError;
|
|
|
|
return msg;
|
|
|
|
}
|
|
|
|
|
2021-03-15 16:01:19 +00:00
|
|
|
std::optional<std::variant<service::Address, RouterID>>
|
|
|
|
TunEndpoint::ObtainAddrForIP(huint128_t ip) const
|
|
|
|
{
|
|
|
|
auto itr = m_IPToAddr.find(ip);
|
|
|
|
if (itr == m_IPToAddr.end())
|
|
|
|
return std::nullopt;
|
|
|
|
if (m_SNodes.at(itr->second))
|
|
|
|
return RouterID{itr->second.as_array()};
|
|
|
|
else
|
|
|
|
return service::Address{itr->second.as_array()};
|
|
|
|
}
|
|
|
|
|
2018-12-03 22:22:59 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
TunEndpoint::HandleHookedDNSMessage(dns::Message msg, std::function<void(dns::Message)> reply)
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
2020-03-12 12:19:37 +00:00
|
|
|
auto ReplyToSNodeDNSWhenReady = [self = this, reply = reply](
|
2020-04-07 18:38:56 +00:00
|
|
|
RouterID snode, auto msg, bool isV6) -> bool {
|
2021-03-16 19:50:37 +00:00
|
|
|
return self->EnsurePathToSNode(
|
|
|
|
snode,
|
|
|
|
[=](const RouterID&, exit::BaseSession_ptr s, [[maybe_unused]] service::ConvoTag tag) {
|
|
|
|
self->SendDNSReply(snode, s, msg, reply, isV6);
|
|
|
|
});
|
2020-03-12 12:19:37 +00:00
|
|
|
};
|
|
|
|
auto ReplyToLokiDNSWhenReady = [self = this, reply = reply](
|
2020-04-07 18:38:56 +00:00
|
|
|
service::Address addr, auto msg, bool isV6) -> bool {
|
2020-03-12 12:19:37 +00:00
|
|
|
using service::Address;
|
|
|
|
using service::OutboundContext;
|
|
|
|
return self->EnsurePathToService(
|
|
|
|
addr,
|
2020-04-07 18:38:56 +00:00
|
|
|
[=](const Address&, OutboundContext* ctx) {
|
2021-03-15 16:01:19 +00:00
|
|
|
self->SendDNSReply(addr, ctx, msg, reply, isV6);
|
2020-03-12 12:19:37 +00:00
|
|
|
},
|
|
|
|
2s);
|
|
|
|
};
|
2020-08-31 20:07:17 +00:00
|
|
|
|
2021-03-20 18:30:18 +00:00
|
|
|
auto ReplyToDNSWhenReady = [ReplyToLokiDNSWhenReady, ReplyToSNodeDNSWhenReady](
|
|
|
|
auto addr, auto msg, bool isV6) {
|
|
|
|
if (auto ptr = std::get_if<RouterID>(&addr))
|
|
|
|
{
|
|
|
|
ReplyToSNodeDNSWhenReady(*ptr, msg, isV6);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (auto ptr = std::get_if<service::Address>(&addr))
|
|
|
|
{
|
|
|
|
ReplyToLokiDNSWhenReady(*ptr, msg, isV6);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
};
|
2021-03-19 20:06:03 +00:00
|
|
|
|
2020-08-31 20:07:17 +00:00
|
|
|
auto ReplyToLokiSRVWhenReady = [self = this, reply = reply](
|
|
|
|
service::Address addr, auto msg) -> bool {
|
|
|
|
using service::Address;
|
|
|
|
using service::OutboundContext;
|
|
|
|
|
|
|
|
return self->EnsurePathToService(
|
|
|
|
addr,
|
|
|
|
[=](const Address&, OutboundContext* ctx) {
|
|
|
|
if (ctx == nullptr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
const auto& introset = ctx->GetCurrentIntroSet();
|
2020-08-31 23:25:58 +00:00
|
|
|
msg->AddSRVReply(introset.GetMatchingSRVRecords(addr.subdomain));
|
2020-08-31 20:07:17 +00:00
|
|
|
reply(*msg);
|
|
|
|
},
|
|
|
|
2s);
|
|
|
|
};
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
if (msg.answers.size() > 0)
|
2020-02-12 20:43:37 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
const auto& answer = msg.answers[0];
|
|
|
|
if (answer.HasCNameForTLD(".snode"))
|
2020-02-13 15:44:43 +00:00
|
|
|
{
|
|
|
|
dns::Name_t qname;
|
|
|
|
llarp_buffer_t buf(answer.rData);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (not dns::DecodeName(&buf, qname, true))
|
2020-02-13 15:44:43 +00:00
|
|
|
return false;
|
|
|
|
RouterID addr;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (not addr.FromString(qname))
|
2020-02-13 15:44:43 +00:00
|
|
|
return false;
|
2020-04-07 18:38:56 +00:00
|
|
|
auto replyMsg = std::make_shared<dns::Message>(clear_dns_message(msg));
|
2020-03-12 12:19:37 +00:00
|
|
|
return ReplyToSNodeDNSWhenReady(addr, std::move(replyMsg), false);
|
2020-02-13 15:44:43 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (answer.HasCNameForTLD(".loki"))
|
2020-02-12 20:43:37 +00:00
|
|
|
{
|
|
|
|
dns::Name_t qname;
|
|
|
|
llarp_buffer_t buf(answer.rData);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (not dns::DecodeName(&buf, qname, true))
|
2020-02-12 20:43:37 +00:00
|
|
|
return false;
|
2020-09-17 19:18:08 +00:00
|
|
|
|
2020-02-12 20:43:37 +00:00
|
|
|
service::Address addr;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (not addr.FromString(qname))
|
2020-02-12 20:43:37 +00:00
|
|
|
return false;
|
2020-09-17 19:18:08 +00:00
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
auto replyMsg = std::make_shared<dns::Message>(clear_dns_message(msg));
|
2020-03-12 12:19:37 +00:00
|
|
|
return ReplyToLokiDNSWhenReady(addr, replyMsg, false);
|
2020-02-12 20:43:37 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
if (msg.questions.size() != 1)
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
|
|
|
llarp::LogWarn("bad number of dns questions: ", msg.questions.size());
|
|
|
|
return false;
|
|
|
|
}
|
2020-09-17 19:18:08 +00:00
|
|
|
std::string qname = msg.questions[0].Name();
|
2020-09-19 14:38:57 +00:00
|
|
|
const auto nameparts = split(qname, ".");
|
|
|
|
std::string lnsName;
|
|
|
|
if (nameparts.size() >= 2 and ends_with(qname, ".loki"))
|
|
|
|
{
|
|
|
|
lnsName = nameparts[nameparts.size() - 2];
|
|
|
|
lnsName += ".loki"sv;
|
|
|
|
}
|
2020-10-12 16:18:46 +00:00
|
|
|
if (msg.questions[0].qtype == dns::qTypeTXT)
|
|
|
|
{
|
|
|
|
RouterID snode;
|
|
|
|
if (snode.FromString(qname))
|
|
|
|
{
|
|
|
|
m_router->LookupRouter(snode, [reply, msg = std::move(msg)](const auto& found) mutable {
|
|
|
|
if (found.empty())
|
|
|
|
{
|
|
|
|
msg.AddNXReply();
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
std::stringstream ss;
|
|
|
|
for (const auto& rc : found)
|
|
|
|
rc.ToTXTRecord(ss);
|
|
|
|
msg.AddTXTReply(ss.str());
|
|
|
|
}
|
|
|
|
reply(msg);
|
|
|
|
});
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
else if (msg.questions[0].IsLocalhost() and msg.questions[0].HasSubdomains())
|
|
|
|
{
|
|
|
|
const auto subdomain = msg.questions[0].Subdomains();
|
|
|
|
if (subdomain == "exit")
|
|
|
|
{
|
|
|
|
if (HasExit())
|
|
|
|
{
|
|
|
|
std::stringstream ss;
|
|
|
|
m_ExitMap.ForEachEntry([&ss](const auto& range, const auto& exit) {
|
|
|
|
ss << range.ToString() << "=" << exit.ToString() << "; ";
|
|
|
|
});
|
|
|
|
msg.AddTXTReply(ss.str());
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
msg.AddNXReply();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (subdomain == "netid")
|
|
|
|
{
|
|
|
|
std::stringstream ss;
|
|
|
|
ss << "netid=" << m_router->rc().netID.ToString() << ";";
|
|
|
|
msg.AddTXTReply(ss.str());
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
msg.AddNXReply();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
msg.AddNXReply();
|
|
|
|
}
|
2020-02-12 20:43:37 +00:00
|
|
|
|
2020-10-12 16:18:46 +00:00
|
|
|
reply(msg);
|
|
|
|
}
|
|
|
|
else if (msg.questions[0].qtype == dns::qTypeMX)
|
2018-12-07 21:52:19 +00:00
|
|
|
{
|
|
|
|
// mx record
|
2019-02-05 14:23:51 +00:00
|
|
|
service::Address addr;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (addr.FromString(qname, ".loki") || addr.FromString(qname, ".snode")
|
|
|
|
|| is_random_snode(msg) || is_localhost_loki(msg))
|
2021-02-25 15:35:50 +00:00
|
|
|
{
|
2018-12-07 21:52:19 +00:00
|
|
|
msg.AddMXReply(qname, 1);
|
2021-02-25 15:35:50 +00:00
|
|
|
}
|
|
|
|
else if (service::NameIsValid(lnsName))
|
|
|
|
{
|
2021-03-19 19:30:09 +00:00
|
|
|
LookupNameAsync(lnsName, [msg, lnsName, reply](auto maybe) mutable {
|
2021-03-03 15:37:31 +00:00
|
|
|
if (maybe.has_value())
|
|
|
|
{
|
2021-03-20 15:47:36 +00:00
|
|
|
var::visit([&](auto&& value) { msg.AddMXReply(value.ToString(), 1); }, *maybe);
|
2021-03-03 15:37:31 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
msg.AddNXReply();
|
|
|
|
}
|
|
|
|
reply(msg);
|
|
|
|
});
|
2021-03-19 19:30:09 +00:00
|
|
|
return true;
|
2021-02-25 15:35:50 +00:00
|
|
|
}
|
2018-12-07 21:52:19 +00:00
|
|
|
else
|
|
|
|
msg.AddNXReply();
|
2018-12-07 22:08:23 +00:00
|
|
|
reply(msg);
|
2018-12-07 21:52:19 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (msg.questions[0].qtype == dns::qTypeCNAME)
|
2019-01-10 15:49:08 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (is_random_snode(msg))
|
2019-01-10 15:49:08 +00:00
|
|
|
{
|
|
|
|
RouterID random;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (Router()->GetRandomGoodRouter(random))
|
2020-03-12 12:19:37 +00:00
|
|
|
{
|
2019-01-10 15:49:08 +00:00
|
|
|
msg.AddCNAMEReply(random.ToString(), 1);
|
2020-03-12 12:19:37 +00:00
|
|
|
}
|
2019-01-10 15:49:08 +00:00
|
|
|
else
|
|
|
|
msg.AddNXReply();
|
|
|
|
}
|
2020-10-31 15:59:03 +00:00
|
|
|
else if (msg.questions[0].IsLocalhost() and msg.questions[0].HasSubdomains())
|
|
|
|
{
|
|
|
|
const auto subdomain = msg.questions[0].Subdomains();
|
|
|
|
if (subdomain == "exit" and HasExit())
|
|
|
|
{
|
|
|
|
m_ExitMap.ForEachEntry(
|
|
|
|
[&msg](const auto&, const auto& exit) { msg.AddCNAMEReply(exit.ToString(), 1); });
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
msg.AddNXReply();
|
|
|
|
}
|
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (is_localhost_loki(msg))
|
2019-01-28 15:26:35 +00:00
|
|
|
{
|
|
|
|
size_t counter = 0;
|
|
|
|
context->ForEachService(
|
2020-04-07 18:38:56 +00:00
|
|
|
[&](const std::string&, const std::shared_ptr<service::Endpoint>& service) -> bool {
|
2019-07-05 14:41:26 +00:00
|
|
|
const service::Address addr = service->GetIdentity().pub.Addr();
|
2019-01-28 15:26:35 +00:00
|
|
|
msg.AddCNAMEReply(addr.ToString(), 1);
|
|
|
|
++counter;
|
|
|
|
return true;
|
|
|
|
});
|
2020-04-07 18:38:56 +00:00
|
|
|
if (counter == 0)
|
2019-01-28 15:26:35 +00:00
|
|
|
msg.AddNXReply();
|
|
|
|
}
|
2019-01-10 15:49:08 +00:00
|
|
|
else
|
|
|
|
msg.AddNXReply();
|
2019-02-05 14:03:38 +00:00
|
|
|
reply(msg);
|
2019-01-10 15:49:08 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (msg.questions[0].qtype == dns::qTypeA || msg.questions[0].qtype == dns::qTypeAAAA)
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
2020-03-09 19:40:43 +00:00
|
|
|
const bool isV6 = msg.questions[0].qtype == dns::qTypeAAAA;
|
2019-06-11 16:44:05 +00:00
|
|
|
const bool isV4 = msg.questions[0].qtype == dns::qTypeA;
|
2018-12-03 22:22:59 +00:00
|
|
|
llarp::service::Address addr;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (isV6 && !SupportsV6())
|
2020-03-09 20:03:44 +00:00
|
|
|
{ // empty reply but not a NXDOMAIN so that client can retry IPv4
|
|
|
|
msg.AddNSReply("localhost.loki.");
|
2020-03-09 19:40:43 +00:00
|
|
|
}
|
2019-02-05 03:19:06 +00:00
|
|
|
// on MacOS this is a typeA query
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (is_random_snode(msg))
|
2019-02-05 03:19:06 +00:00
|
|
|
{
|
2019-02-05 21:04:30 +00:00
|
|
|
RouterID random;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (Router()->GetRandomGoodRouter(random))
|
2020-03-12 12:19:37 +00:00
|
|
|
{
|
2019-02-05 21:04:30 +00:00
|
|
|
msg.AddCNAMEReply(random.ToString(), 1);
|
2020-04-07 18:38:56 +00:00
|
|
|
return ReplyToSNodeDNSWhenReady(random, std::make_shared<dns::Message>(msg), isV6);
|
2020-03-12 12:19:37 +00:00
|
|
|
}
|
2019-02-05 21:04:30 +00:00
|
|
|
else
|
|
|
|
msg.AddNXReply();
|
2019-02-05 03:19:06 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (is_localhost_loki(msg))
|
2018-12-13 00:03:19 +00:00
|
|
|
{
|
2020-10-31 15:59:03 +00:00
|
|
|
const bool lookingForExit = msg.questions[0].Subdomains() == "exit";
|
|
|
|
huint128_t ip = GetIfAddr();
|
|
|
|
if (ip.h)
|
|
|
|
{
|
|
|
|
if (lookingForExit)
|
|
|
|
{
|
|
|
|
if (HasExit())
|
|
|
|
{
|
|
|
|
m_ExitMap.ForEachEntry(
|
|
|
|
[&msg](const auto&, const auto& exit) { msg.AddCNAMEReply(exit.ToString()); });
|
|
|
|
msg.AddINReply(ip, isV6);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
msg.AddNXReply();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
msg.AddCNAMEReply(m_Identity.pub.Name(), 1);
|
|
|
|
msg.AddINReply(ip, isV6);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-12-13 00:03:19 +00:00
|
|
|
msg.AddNXReply();
|
2020-10-31 15:59:03 +00:00
|
|
|
}
|
2018-12-13 00:03:19 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (addr.FromString(qname, ".loki"))
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (isV4 && SupportsV6())
|
2018-12-07 20:56:01 +00:00
|
|
|
{
|
2019-06-11 18:23:53 +00:00
|
|
|
msg.hdr_fields |= dns::flags_QR | dns::flags_AA | dns::flags_RA;
|
2019-06-11 16:44:05 +00:00
|
|
|
}
|
2018-12-07 20:56:01 +00:00
|
|
|
else
|
2019-01-07 22:15:31 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
return ReplyToLokiDNSWhenReady(addr, std::make_shared<dns::Message>(msg), isV6);
|
2019-01-07 22:15:31 +00:00
|
|
|
}
|
2018-12-03 22:22:59 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (addr.FromString(qname, ".snode"))
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (isV4 && SupportsV6())
|
2019-06-12 13:48:14 +00:00
|
|
|
{
|
|
|
|
msg.hdr_fields |= dns::flags_QR | dns::flags_AA | dns::flags_RA;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2020-03-12 12:19:37 +00:00
|
|
|
return ReplyToSNodeDNSWhenReady(
|
2020-04-07 18:38:56 +00:00
|
|
|
addr.as_array(), std::make_shared<dns::Message>(msg), isV6);
|
2019-06-12 13:48:14 +00:00
|
|
|
}
|
2018-12-03 22:22:59 +00:00
|
|
|
}
|
2020-09-19 14:38:57 +00:00
|
|
|
else if (service::NameIsValid(lnsName))
|
2020-09-17 19:18:08 +00:00
|
|
|
{
|
2021-03-19 19:30:09 +00:00
|
|
|
LookupNameAsync(
|
2020-09-19 14:38:57 +00:00
|
|
|
lnsName,
|
2020-09-19 13:29:36 +00:00
|
|
|
[msg = std::make_shared<dns::Message>(msg),
|
|
|
|
name = Name(),
|
2020-09-19 14:38:57 +00:00
|
|
|
lnsName,
|
2020-09-19 13:29:36 +00:00
|
|
|
isV6,
|
|
|
|
reply,
|
2021-03-19 20:06:03 +00:00
|
|
|
ReplyToDNSWhenReady](auto maybe) {
|
2020-09-17 19:18:08 +00:00
|
|
|
if (not maybe.has_value())
|
|
|
|
{
|
2020-09-19 14:38:57 +00:00
|
|
|
LogWarn(name, " lns name ", lnsName, " not resolved");
|
2020-09-17 19:18:08 +00:00
|
|
|
msg->AddNXReply();
|
|
|
|
reply(*msg);
|
|
|
|
return;
|
|
|
|
}
|
2021-03-19 20:06:03 +00:00
|
|
|
ReplyToDNSWhenReady(*maybe, msg, isV6);
|
2020-09-17 19:18:08 +00:00
|
|
|
});
|
2021-03-19 19:30:09 +00:00
|
|
|
return true;
|
2020-09-17 19:18:08 +00:00
|
|
|
}
|
2018-12-03 22:22:59 +00:00
|
|
|
else
|
2018-12-04 16:16:43 +00:00
|
|
|
msg.AddNXReply();
|
|
|
|
|
|
|
|
reply(msg);
|
2018-12-03 22:22:59 +00:00
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
else if (msg.questions[0].qtype == dns::qTypePTR)
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
2018-12-04 16:16:43 +00:00
|
|
|
// reverse dns
|
2019-06-12 13:48:14 +00:00
|
|
|
huint128_t ip = {0};
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!dns::DecodePTR(msg.questions[0].qname, ip))
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
2018-12-04 16:16:43 +00:00
|
|
|
msg.AddNXReply();
|
|
|
|
reply(msg);
|
2018-12-03 22:22:59 +00:00
|
|
|
return true;
|
|
|
|
}
|
2021-03-15 16:01:19 +00:00
|
|
|
|
|
|
|
if (auto maybe = ObtainAddrForIP(ip))
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
2021-03-15 16:01:19 +00:00
|
|
|
std::visit([&msg](auto&& result) { msg.AddAReply(result.ToString()); }, *maybe);
|
2018-12-04 16:16:43 +00:00
|
|
|
reply(msg);
|
2018-12-03 22:22:59 +00:00
|
|
|
return true;
|
|
|
|
}
|
2018-12-04 16:16:43 +00:00
|
|
|
msg.AddNXReply();
|
|
|
|
reply(msg);
|
2018-12-03 22:22:59 +00:00
|
|
|
return true;
|
|
|
|
}
|
2020-08-31 20:07:17 +00:00
|
|
|
else if (msg.questions[0].qtype == dns::qTypeSRV)
|
|
|
|
{
|
|
|
|
llarp::service::Address addr;
|
|
|
|
|
|
|
|
if (is_localhost_loki(msg))
|
|
|
|
{
|
2020-08-31 23:25:58 +00:00
|
|
|
msg.AddSRVReply(introSet().GetMatchingSRVRecords(msg.questions[0].Subdomains()));
|
2020-08-31 20:07:17 +00:00
|
|
|
reply(msg);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
else if (addr.FromString(qname, ".loki"))
|
|
|
|
{
|
2020-08-31 23:25:58 +00:00
|
|
|
llarp::LogDebug("SRV request for: ", qname);
|
|
|
|
|
2020-08-31 20:07:17 +00:00
|
|
|
return ReplyToLokiSRVWhenReady(addr, std::make_shared<dns::Message>(msg));
|
|
|
|
}
|
|
|
|
}
|
2018-12-03 22:22:59 +00:00
|
|
|
else
|
2018-12-04 16:16:43 +00:00
|
|
|
{
|
|
|
|
msg.AddNXReply();
|
|
|
|
reply(msg);
|
|
|
|
}
|
2018-12-03 22:22:59 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-05-07 17:46:38 +00:00
|
|
|
void
|
|
|
|
TunEndpoint::ResetInternalState()
|
|
|
|
{
|
|
|
|
service::Endpoint::ResetInternalState();
|
|
|
|
}
|
|
|
|
|
2019-06-11 16:44:05 +00:00
|
|
|
bool
|
|
|
|
TunEndpoint::SupportsV6() const
|
|
|
|
{
|
|
|
|
return m_UseV6;
|
|
|
|
}
|
|
|
|
|
2019-03-20 03:18:38 +00:00
|
|
|
// FIXME: pass in which question it should be addressing
|
2018-12-03 22:22:59 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
TunEndpoint::ShouldHookDNSMessage(const dns::Message& msg) const
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
|
|
|
llarp::service::Address addr;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (msg.questions.size() == 1)
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
2019-05-01 13:40:10 +00:00
|
|
|
/// hook every .loki
|
2020-04-07 18:38:56 +00:00
|
|
|
if (msg.questions[0].HasTLD(".loki"))
|
2018-12-13 00:03:19 +00:00
|
|
|
return true;
|
2019-05-01 13:40:10 +00:00
|
|
|
/// hook every .snode
|
2020-04-07 18:38:56 +00:00
|
|
|
if (msg.questions[0].HasTLD(".snode"))
|
2018-12-03 22:22:59 +00:00
|
|
|
return true;
|
2019-01-10 15:49:08 +00:00
|
|
|
// hook any ranges we own
|
2020-04-07 18:38:56 +00:00
|
|
|
if (msg.questions[0].qtype == llarp::dns::qTypePTR)
|
2018-12-03 22:22:59 +00:00
|
|
|
{
|
2019-06-12 13:48:14 +00:00
|
|
|
huint128_t ip = {0};
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!dns::DecodePTR(msg.questions[0].qname, ip))
|
2018-12-03 22:22:59 +00:00
|
|
|
return false;
|
2019-06-12 13:48:14 +00:00
|
|
|
return m_OurRange.Contains(ip);
|
2018-12-03 22:22:59 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& answer : msg.answers)
|
2020-02-12 20:43:37 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (answer.HasCNameForTLD(".loki"))
|
2020-02-12 20:43:37 +00:00
|
|
|
return true;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (answer.HasCNameForTLD(".snode"))
|
2020-02-12 20:43:37 +00:00
|
|
|
return true;
|
|
|
|
}
|
2018-12-03 22:22:59 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-08-22 15:52:10 +00:00
|
|
|
bool
|
2020-04-07 18:38:56 +00:00
|
|
|
TunEndpoint::MapAddress(const service::Address& addr, huint128_t ip, bool SNode)
|
2018-08-22 15:52:10 +00:00
|
|
|
{
|
|
|
|
auto itr = m_IPToAddr.find(ip);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr != m_IPToAddr.end())
|
2018-08-22 15:52:10 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
llarp::LogWarn(
|
|
|
|
ip, " already mapped to ", service::Address(itr->second.as_array()).ToString());
|
2018-08-22 15:52:10 +00:00
|
|
|
return false;
|
|
|
|
}
|
2018-11-03 13:19:18 +00:00
|
|
|
llarp::LogInfo(Name() + " map ", addr.ToString(), " to ", ip);
|
2018-10-19 15:04:14 +00:00
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
m_IPToAddr[ip] = addr;
|
2019-01-02 01:04:08 +00:00
|
|
|
m_AddrToIP[addr] = ip;
|
2020-04-07 18:38:56 +00:00
|
|
|
m_SNodes[addr] = SNode;
|
2018-09-10 11:08:09 +00:00
|
|
|
MarkIPActiveForever(ip);
|
2018-08-22 15:52:10 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:07:37 +00:00
|
|
|
std::string
|
|
|
|
TunEndpoint::GetIfName() const
|
|
|
|
{
|
2020-09-28 22:43:31 +00:00
|
|
|
#ifdef _WIN32
|
|
|
|
return net::TruncateV6(GetIfAddr()).ToString();
|
|
|
|
#else
|
2020-08-21 15:07:37 +00:00
|
|
|
return m_IfName;
|
2020-09-28 22:43:31 +00:00
|
|
|
#endif
|
2020-08-21 15:07:37 +00:00
|
|
|
}
|
|
|
|
|
2018-08-16 14:34:15 +00:00
|
|
|
bool
|
|
|
|
TunEndpoint::Start()
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!Endpoint::Start())
|
2019-01-16 21:08:00 +00:00
|
|
|
{
|
|
|
|
llarp::LogWarn("Couldn't start endpoint");
|
2018-08-16 14:34:15 +00:00
|
|
|
return false;
|
2019-01-16 21:08:00 +00:00
|
|
|
}
|
2018-08-21 18:39:18 +00:00
|
|
|
return SetupNetworking();
|
2018-08-16 14:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-12-02 18:07:07 +00:00
|
|
|
bool
|
|
|
|
TunEndpoint::IsSNode() const
|
2018-11-30 14:14:30 +00:00
|
|
|
{
|
|
|
|
// TODO : implement me
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-08-16 14:34:15 +00:00
|
|
|
bool
|
|
|
|
TunEndpoint::SetupTun()
|
|
|
|
{
|
2021-01-11 23:13:22 +00:00
|
|
|
m_NextIP = m_OurIP;
|
|
|
|
m_MaxIP = m_OurRange.HighestAddr();
|
|
|
|
llarp::LogInfo(Name(), " set ", m_IfName, " to have address ", m_OurIP);
|
|
|
|
llarp::LogInfo(Name(), " allocated up to ", m_MaxIP, " on range ", m_OurRange);
|
2018-09-23 16:48:43 +00:00
|
|
|
|
2021-01-11 23:13:22 +00:00
|
|
|
const service::Address ourAddr = m_Identity.pub.Addr();
|
|
|
|
|
|
|
|
if (not MapAddress(ourAddr, GetIfAddr(), false))
|
2019-10-04 18:10:58 +00:00
|
|
|
{
|
2021-01-11 23:13:22 +00:00
|
|
|
return false;
|
2019-10-04 18:10:58 +00:00
|
|
|
}
|
2021-01-11 23:13:22 +00:00
|
|
|
|
|
|
|
vpn::InterfaceInfo info;
|
|
|
|
info.addrs.emplace(m_OurRange);
|
2021-03-18 15:59:02 +00:00
|
|
|
|
|
|
|
if (m_BaseV6Address)
|
|
|
|
{
|
|
|
|
IPRange v6range = m_OurRange;
|
2021-03-18 17:12:35 +00:00
|
|
|
v6range.addr = (*m_BaseV6Address) | m_OurRange.addr;
|
2021-03-18 15:59:02 +00:00
|
|
|
LogInfo(Name(), " using v6 range: ", v6range);
|
|
|
|
info.addrs.emplace(v6range, AF_INET6);
|
|
|
|
}
|
2021-02-16 15:59:18 +00:00
|
|
|
|
2021-01-11 23:13:22 +00:00
|
|
|
info.ifname = m_IfName;
|
|
|
|
info.dnsaddr.FromString(m_LocalResolverAddr.toHost());
|
|
|
|
|
|
|
|
LogInfo(Name(), " setting up network...");
|
|
|
|
|
|
|
|
try
|
2019-06-11 16:44:05 +00:00
|
|
|
{
|
2021-01-11 23:13:22 +00:00
|
|
|
m_NetIf = Router()->GetVPNPlatform()->ObtainInterface(std::move(info));
|
2019-06-11 16:44:05 +00:00
|
|
|
}
|
2021-01-11 23:13:22 +00:00
|
|
|
catch (std::exception& ex)
|
2019-06-11 16:44:05 +00:00
|
|
|
{
|
2021-01-11 23:13:22 +00:00
|
|
|
LogError(Name(), " failed to set up network interface: ", ex.what());
|
2019-10-04 18:10:58 +00:00
|
|
|
}
|
2021-01-11 23:13:22 +00:00
|
|
|
if (not m_NetIf)
|
2019-10-04 18:10:58 +00:00
|
|
|
{
|
2021-01-11 23:13:22 +00:00
|
|
|
LogError(Name(), " failed to obtain network interface");
|
2019-10-04 18:10:58 +00:00
|
|
|
return false;
|
2018-09-23 16:48:43 +00:00
|
|
|
}
|
2021-01-11 23:13:22 +00:00
|
|
|
m_IfName = m_NetIf->IfName();
|
|
|
|
LogInfo(Name(), " got network interface ", m_IfName);
|
2018-09-23 16:48:43 +00:00
|
|
|
|
2021-03-02 15:23:38 +00:00
|
|
|
if (not Router()->loop()->add_network_interface(m_NetIf, [this](net::IPPacket pkt) {
|
|
|
|
m_PacketRouter->HandleIPPacket(std::move(pkt));
|
|
|
|
}))
|
2019-11-29 00:37:58 +00:00
|
|
|
{
|
2021-01-11 23:13:22 +00:00
|
|
|
LogError(Name(), " failed to add network interface");
|
2019-11-29 00:37:58 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-02-17 11:37:21 +00:00
|
|
|
const auto maybe = GetInterfaceIPv6Address(m_IfName);
|
2021-02-16 15:59:18 +00:00
|
|
|
if (maybe.has_value())
|
|
|
|
{
|
2021-02-17 12:54:18 +00:00
|
|
|
m_OurIPv6 = *maybe;
|
|
|
|
LogInfo(Name(), " has ipv6 address ", m_OurIPv6);
|
2021-02-16 15:59:18 +00:00
|
|
|
}
|
|
|
|
|
2021-03-02 07:02:59 +00:00
|
|
|
Router()->loop()->add_ticker([this] { Flush(); });
|
2021-01-11 23:13:22 +00:00
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
if (m_OnUp)
|
2019-04-22 12:25:25 +00:00
|
|
|
{
|
2019-04-22 14:00:59 +00:00
|
|
|
m_OnUp->NotifyAsync(NotifyParams());
|
2019-04-22 12:25:25 +00:00
|
|
|
}
|
2019-11-29 00:37:58 +00:00
|
|
|
return HasAddress(ourAddr);
|
2018-08-16 14:34:15 +00:00
|
|
|
}
|
|
|
|
|
2020-04-07 18:38:56 +00:00
|
|
|
std::unordered_map<std::string, std::string>
|
2019-04-22 14:00:59 +00:00
|
|
|
TunEndpoint::NotifyParams() const
|
|
|
|
{
|
|
|
|
auto env = Endpoint::NotifyParams();
|
|
|
|
env.emplace("IP_ADDR", m_OurIP.ToString());
|
|
|
|
env.emplace("IF_ADDR", m_OurRange.ToString());
|
2021-01-11 23:13:22 +00:00
|
|
|
env.emplace("IF_NAME", m_IfName);
|
2019-05-06 12:42:21 +00:00
|
|
|
std::string strictConnect;
|
2020-04-07 18:38:56 +00:00
|
|
|
for (const auto& addr : m_StrictConnectAddrs)
|
2020-05-06 20:38:44 +00:00
|
|
|
strictConnect += addr.toString() + " ";
|
2019-05-06 12:42:21 +00:00
|
|
|
env.emplace("STRICT_CONNECT_ADDRS", strictConnect);
|
2019-04-22 14:00:59 +00:00
|
|
|
return env;
|
|
|
|
}
|
|
|
|
|
2018-08-16 14:34:15 +00:00
|
|
|
bool
|
|
|
|
TunEndpoint::SetupNetworking()
|
|
|
|
{
|
2018-08-17 19:49:58 +00:00
|
|
|
llarp::LogInfo("Set Up networking for ", Name());
|
2020-04-07 18:38:56 +00:00
|
|
|
if (!SetupTun())
|
2018-10-04 13:42:22 +00:00
|
|
|
{
|
2018-12-03 22:22:59 +00:00
|
|
|
llarp::LogError(Name(), " failed to set up network interface");
|
|
|
|
return false;
|
2018-10-04 13:42:22 +00:00
|
|
|
}
|
2021-03-02 18:18:22 +00:00
|
|
|
if (!m_Resolver->Start(m_LocalResolverAddr.createSockAddr(), m_UpstreamResolvers))
|
2018-09-22 10:25:16 +00:00
|
|
|
{
|
2020-10-21 13:06:43 +00:00
|
|
|
llarp::LogError(Name(), " failed to start DNS server");
|
|
|
|
return false;
|
2018-09-22 10:25:16 +00:00
|
|
|
}
|
2018-12-03 22:22:59 +00:00
|
|
|
return true;
|
2018-08-16 14:34:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
TunEndpoint::Tick(llarp_time_t now)
|
|
|
|
{
|
2019-11-20 19:45:23 +00:00
|
|
|
Endpoint::Tick(now);
|
2018-08-16 14:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-12-24 16:09:05 +00:00
|
|
|
bool
|
|
|
|
TunEndpoint::Stop()
|
|
|
|
{
|
2020-10-29 14:19:45 +00:00
|
|
|
if (m_Resolver)
|
|
|
|
m_Resolver->Stop();
|
2018-12-24 16:09:05 +00:00
|
|
|
return llarp::service::Endpoint::Stop();
|
|
|
|
}
|
|
|
|
|
2018-08-22 15:52:10 +00:00
|
|
|
void
|
|
|
|
TunEndpoint::FlushSend()
|
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
m_UserToNetworkPktQueue.Process([&](net::IPPacket& pkt) {
|
2020-05-21 14:18:23 +00:00
|
|
|
huint128_t dst, src;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (pkt.IsV4())
|
2020-05-21 14:18:23 +00:00
|
|
|
{
|
|
|
|
dst = pkt.dst4to6();
|
|
|
|
src = pkt.src4to6();
|
|
|
|
}
|
2019-06-11 16:44:05 +00:00
|
|
|
else
|
2020-05-21 14:18:23 +00:00
|
|
|
{
|
2019-06-11 16:44:05 +00:00
|
|
|
dst = pkt.dstv6();
|
2020-05-21 14:18:23 +00:00
|
|
|
src = pkt.srcv6();
|
|
|
|
}
|
2021-01-11 23:13:22 +00:00
|
|
|
// this is for ipv6 slaac on ipv6 exits
|
|
|
|
/*
|
|
|
|
constexpr huint128_t ipv6_multicast_all_nodes =
|
|
|
|
huint128_t{uint128_t{0xff01'0000'0000'0000UL, 1UL}};
|
|
|
|
constexpr huint128_t ipv6_multicast_all_routers =
|
|
|
|
huint128_t{uint128_t{0xff01'0000'0000'0000UL, 2UL}};
|
|
|
|
if (dst == ipv6_multicast_all_nodes and m_state->m_ExitEnabled)
|
|
|
|
{
|
|
|
|
// send ipv6 multicast
|
|
|
|
for (const auto& [ip, addr] : m_IPToAddr)
|
|
|
|
{
|
|
|
|
(void)ip;
|
2021-03-23 18:18:21 +00:00
|
|
|
SendToOrQueue(
|
2021-03-08 20:48:11 +00:00
|
|
|
service::Address{addr.as_array()}, pkt.ConstBuffer(), service::ProtocolType::Exit);
|
2021-01-11 23:13:22 +00:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
*/
|
2021-02-16 15:59:18 +00:00
|
|
|
if (m_state->m_ExitEnabled)
|
|
|
|
{
|
|
|
|
dst = net::ExpandV4(net::TruncateV6(dst));
|
|
|
|
}
|
2019-06-11 16:44:05 +00:00
|
|
|
auto itr = m_IPToAddr.find(dst);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr == m_IPToAddr.end())
|
2018-08-22 15:52:10 +00:00
|
|
|
{
|
2020-06-24 13:24:07 +00:00
|
|
|
const auto exits = m_ExitMap.FindAll(dst);
|
|
|
|
if (IsBogon(dst) or exits.empty())
|
2019-06-11 19:48:21 +00:00
|
|
|
{
|
2020-05-21 14:18:23 +00:00
|
|
|
// send icmp unreachable
|
|
|
|
const auto icmp = pkt.MakeICMPUnreachable();
|
|
|
|
if (icmp.has_value())
|
2019-09-03 15:56:56 +00:00
|
|
|
{
|
2020-10-08 11:59:01 +00:00
|
|
|
HandleWriteIPPacket(icmp->ConstBuffer(), dst, src, 0);
|
2019-09-03 15:56:56 +00:00
|
|
|
}
|
2019-06-11 16:44:05 +00:00
|
|
|
}
|
2020-05-21 14:18:23 +00:00
|
|
|
else
|
|
|
|
{
|
2020-06-24 13:24:07 +00:00
|
|
|
const auto addr = *exits.begin();
|
2021-02-16 15:59:18 +00:00
|
|
|
pkt.ZeroSourceAddress();
|
2020-06-24 13:24:07 +00:00
|
|
|
MarkAddressOutbound(addr);
|
2020-05-21 14:18:23 +00:00
|
|
|
EnsurePathToService(
|
2020-06-24 13:24:07 +00:00
|
|
|
addr,
|
2020-08-25 18:57:15 +00:00
|
|
|
[addr, pkt, self = this](service::Address, service::OutboundContext* ctx) {
|
|
|
|
if (ctx)
|
|
|
|
{
|
|
|
|
ctx->sendTimeout = 5s;
|
|
|
|
}
|
2021-03-23 18:18:21 +00:00
|
|
|
self->SendToOrQueue(addr, pkt.ConstBuffer(), service::ProtocolType::Exit);
|
2020-05-21 14:18:23 +00:00
|
|
|
},
|
|
|
|
1s);
|
|
|
|
}
|
2019-06-11 16:44:05 +00:00
|
|
|
return;
|
2018-08-22 15:52:10 +00:00
|
|
|
}
|
2021-01-11 23:13:22 +00:00
|
|
|
bool rewriteAddrs = true;
|
2021-03-23 18:18:21 +00:00
|
|
|
std::variant<service::Address, RouterID> to;
|
|
|
|
service::ProtocolType type;
|
2020-04-07 18:38:56 +00:00
|
|
|
if (m_SNodes.at(itr->second))
|
2018-11-29 13:12:35 +00:00
|
|
|
{
|
2021-03-23 18:18:21 +00:00
|
|
|
to = RouterID{itr->second.as_array()};
|
|
|
|
type = service::ProtocolType::TrafficV4;
|
2020-05-21 14:18:23 +00:00
|
|
|
}
|
2018-12-02 18:07:07 +00:00
|
|
|
else
|
2018-11-29 13:12:35 +00:00
|
|
|
{
|
2021-03-23 18:18:21 +00:00
|
|
|
to = service::Address{itr->second.as_array()};
|
|
|
|
type = m_state->m_ExitEnabled and src != m_OurIP ? service::ProtocolType::Exit
|
|
|
|
: pkt.ServiceProtocol();
|
2018-11-29 13:12:35 +00:00
|
|
|
}
|
2021-03-23 18:18:21 +00:00
|
|
|
|
2018-10-10 00:28:53 +00:00
|
|
|
// prepare packet for insertion into network
|
2018-10-10 21:29:49 +00:00
|
|
|
// this includes clearing IP addresses, recalculating checksums, etc
|
2021-01-11 23:13:22 +00:00
|
|
|
if (rewriteAddrs)
|
2020-05-21 14:18:23 +00:00
|
|
|
{
|
|
|
|
if (pkt.IsV4())
|
|
|
|
pkt.UpdateIPv4Address({0}, {0});
|
|
|
|
else
|
|
|
|
pkt.UpdateIPv6Address({0}, {0});
|
|
|
|
}
|
2021-03-23 18:18:21 +00:00
|
|
|
if (SendToOrQueue(to, pkt.Buffer(), type))
|
2019-08-01 12:20:51 +00:00
|
|
|
{
|
|
|
|
MarkIPActive(dst);
|
2019-06-11 16:44:05 +00:00
|
|
|
return;
|
2019-08-01 12:20:51 +00:00
|
|
|
}
|
2018-11-29 13:12:35 +00:00
|
|
|
llarp::LogWarn(Name(), " did not flush packets");
|
2018-08-22 15:52:10 +00:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2018-09-18 11:08:47 +00:00
|
|
|
bool
|
2020-05-21 14:18:23 +00:00
|
|
|
TunEndpoint::HandleInboundPacket(
|
2020-10-08 11:59:01 +00:00
|
|
|
const service::ConvoTag tag,
|
|
|
|
const llarp_buffer_t& buf,
|
|
|
|
service::ProtocolType t,
|
|
|
|
uint64_t seqno)
|
2020-05-21 14:18:23 +00:00
|
|
|
{
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
if (t == service::ProtocolType::QUIC)
|
|
|
|
{
|
|
|
|
auto* quic = GetQUICTunnel();
|
|
|
|
if (!quic)
|
|
|
|
{
|
|
|
|
LogWarn("incoming quic packet but this endpoint is not quic capable; dropping");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (buf.sz < 4)
|
|
|
|
{
|
|
|
|
LogWarn("invalid incoming quic packet, dropping");
|
|
|
|
return false;
|
|
|
|
}
|
2021-03-26 20:45:19 +00:00
|
|
|
LogInfo("tag active T=", tag);
|
|
|
|
MarkConvoTagActive(tag);
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
quic->receive_packet(tag, buf);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-03-08 20:48:11 +00:00
|
|
|
if (t != service::ProtocolType::TrafficV4 && t != service::ProtocolType::TrafficV6
|
|
|
|
&& t != service::ProtocolType::Exit)
|
2020-05-21 14:18:23 +00:00
|
|
|
return false;
|
2021-03-15 16:01:19 +00:00
|
|
|
std::variant<service::Address, RouterID> addr;
|
|
|
|
if (auto maybe = GetEndpointWithConvoTag(tag))
|
|
|
|
{
|
|
|
|
addr = *maybe;
|
|
|
|
}
|
|
|
|
else
|
2020-05-21 14:18:23 +00:00
|
|
|
return false;
|
|
|
|
huint128_t src, dst;
|
|
|
|
|
|
|
|
net::IPPacket pkt;
|
|
|
|
if (not pkt.Load(buf))
|
|
|
|
return false;
|
2021-01-11 23:13:22 +00:00
|
|
|
|
2020-06-24 13:24:07 +00:00
|
|
|
if (m_state->m_ExitEnabled)
|
2020-05-21 14:18:23 +00:00
|
|
|
{
|
|
|
|
// exit side from exit
|
2021-03-15 16:01:19 +00:00
|
|
|
src = ObtainIPForAddr(addr);
|
2021-03-08 20:48:11 +00:00
|
|
|
if (t == service::ProtocolType::Exit)
|
2021-01-11 23:13:22 +00:00
|
|
|
{
|
|
|
|
if (pkt.IsV4())
|
2021-03-04 01:08:38 +00:00
|
|
|
dst = pkt.dst4to6();
|
2021-01-11 23:13:22 +00:00
|
|
|
else if (pkt.IsV6())
|
2021-02-16 15:59:18 +00:00
|
|
|
{
|
2021-01-11 23:13:22 +00:00
|
|
|
dst = pkt.dstv6();
|
2021-02-16 15:59:18 +00:00
|
|
|
src = net::ExpandV4Lan(net::TruncateV6(src));
|
|
|
|
}
|
2021-01-11 23:13:22 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// non exit traffic on exit
|
|
|
|
dst = m_OurIP;
|
|
|
|
}
|
2020-05-21 14:18:23 +00:00
|
|
|
}
|
2021-03-08 20:48:11 +00:00
|
|
|
else if (t == service::ProtocolType::Exit)
|
2020-06-24 13:24:07 +00:00
|
|
|
{
|
|
|
|
// client side exit traffic from exit
|
|
|
|
if (pkt.IsV4())
|
2021-01-11 23:13:22 +00:00
|
|
|
{
|
|
|
|
dst = m_OurIP;
|
2021-03-04 01:08:38 +00:00
|
|
|
src = pkt.src4to6();
|
2021-01-11 23:13:22 +00:00
|
|
|
}
|
2020-06-24 13:24:07 +00:00
|
|
|
else if (pkt.IsV6())
|
2021-01-11 23:13:22 +00:00
|
|
|
{
|
2021-02-17 12:54:18 +00:00
|
|
|
dst = m_OurIPv6;
|
2020-06-24 13:24:07 +00:00
|
|
|
src = pkt.srcv6();
|
2021-01-11 23:13:22 +00:00
|
|
|
}
|
2020-06-24 13:24:07 +00:00
|
|
|
// find what exit we think this should be for
|
|
|
|
const auto mapped = m_ExitMap.FindAll(src);
|
2021-03-15 16:01:19 +00:00
|
|
|
if (IsBogon(src))
|
2020-06-24 13:24:07 +00:00
|
|
|
return false;
|
2021-03-15 16:01:19 +00:00
|
|
|
|
|
|
|
if (const auto ptr = std::get_if<service::Address>(&addr))
|
|
|
|
{
|
|
|
|
if (mapped.count(*ptr) == 0)
|
|
|
|
{
|
|
|
|
// we got exit traffic from someone who we should not have gotten it from
|
|
|
|
return false;
|
|
|
|
}
|
2020-06-24 13:24:07 +00:00
|
|
|
}
|
|
|
|
}
|
2020-05-21 14:18:23 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
// snapp traffic
|
2021-03-15 16:01:19 +00:00
|
|
|
src = ObtainIPForAddr(addr);
|
2020-05-21 14:18:23 +00:00
|
|
|
dst = m_OurIP;
|
|
|
|
}
|
2020-10-08 11:59:01 +00:00
|
|
|
HandleWriteIPPacket(buf, src, dst, seqno);
|
2020-05-21 14:18:23 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2020-10-08 11:59:01 +00:00
|
|
|
TunEndpoint::HandleWriteIPPacket(
|
|
|
|
const llarp_buffer_t& b, huint128_t src, huint128_t dst, uint64_t seqno)
|
2018-08-18 14:01:21 +00:00
|
|
|
{
|
2019-02-03 00:48:10 +00:00
|
|
|
ManagedBuffer buf(b);
|
2020-10-08 11:59:01 +00:00
|
|
|
WritePacket write;
|
|
|
|
write.seqno = seqno;
|
|
|
|
auto& pkt = write.pkt;
|
|
|
|
// load
|
|
|
|
if (!pkt.Load(buf))
|
2021-03-02 18:18:22 +00:00
|
|
|
{
|
2020-10-08 11:59:01 +00:00
|
|
|
return false;
|
2021-03-02 18:18:22 +00:00
|
|
|
}
|
2020-10-08 11:59:01 +00:00
|
|
|
if (pkt.IsV4())
|
|
|
|
{
|
|
|
|
pkt.UpdateIPv4Address(xhtonl(net::TruncateV6(src)), xhtonl(net::TruncateV6(dst)));
|
|
|
|
}
|
|
|
|
else if (pkt.IsV6())
|
|
|
|
{
|
|
|
|
pkt.UpdateIPv6Address(src, dst);
|
|
|
|
}
|
|
|
|
m_NetworkToUserPktQueue.push(std::move(write));
|
|
|
|
return true;
|
2018-08-20 19:12:12 +00:00
|
|
|
}
|
2018-10-19 15:04:14 +00:00
|
|
|
|
2019-06-11 16:44:05 +00:00
|
|
|
huint128_t
|
2018-11-14 12:23:08 +00:00
|
|
|
TunEndpoint::GetIfAddr() const
|
2018-10-19 14:53:06 +00:00
|
|
|
{
|
2018-11-14 12:23:08 +00:00
|
|
|
return m_OurIP;
|
2018-10-19 14:53:06 +00:00
|
|
|
}
|
2018-08-20 19:12:12 +00:00
|
|
|
|
2019-06-11 16:44:05 +00:00
|
|
|
huint128_t
|
2021-03-15 16:01:19 +00:00
|
|
|
TunEndpoint::ObtainIPForAddr(std::variant<service::Address, RouterID> addr)
|
2018-08-20 19:12:12 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
llarp_time_t now = Now();
|
2019-06-11 16:44:05 +00:00
|
|
|
huint128_t nextIP = {0};
|
2021-03-15 16:01:19 +00:00
|
|
|
AlignedBuffer<32> ident{};
|
|
|
|
bool snode = false;
|
|
|
|
|
|
|
|
std::visit([&ident](auto&& val) { ident = val.data(); }, addr);
|
|
|
|
|
|
|
|
if (std::get_if<RouterID>(&addr))
|
|
|
|
{
|
|
|
|
snode = true;
|
|
|
|
}
|
|
|
|
|
2018-08-21 18:17:16 +00:00
|
|
|
{
|
|
|
|
// previously allocated address
|
2018-11-14 20:56:54 +00:00
|
|
|
auto itr = m_AddrToIP.find(ident);
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr != m_AddrToIP.end())
|
2018-08-22 15:52:10 +00:00
|
|
|
{
|
|
|
|
// mark ip active
|
2018-09-16 12:25:17 +00:00
|
|
|
MarkIPActive(itr->second);
|
2018-08-21 18:17:16 +00:00
|
|
|
return itr->second;
|
2018-08-22 15:52:10 +00:00
|
|
|
}
|
2018-08-21 18:17:16 +00:00
|
|
|
}
|
2018-09-13 16:41:53 +00:00
|
|
|
// allocate new address
|
2020-04-07 18:38:56 +00:00
|
|
|
if (m_NextIP < m_MaxIP)
|
2018-08-21 18:17:16 +00:00
|
|
|
{
|
2018-09-19 14:49:42 +00:00
|
|
|
do
|
|
|
|
{
|
|
|
|
nextIP = ++m_NextIP;
|
2020-04-07 18:38:56 +00:00
|
|
|
} while (m_IPToAddr.find(nextIP) != m_IPToAddr.end() && m_NextIP < m_MaxIP);
|
|
|
|
if (nextIP < m_MaxIP)
|
2018-09-19 14:49:42 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
m_AddrToIP[ident] = nextIP;
|
2018-11-14 21:40:44 +00:00
|
|
|
m_IPToAddr[nextIP] = ident;
|
2020-04-07 18:38:56 +00:00
|
|
|
m_SNodes[ident] = snode;
|
2018-11-14 20:56:54 +00:00
|
|
|
llarp::LogInfo(Name(), " mapped ", ident, " to ", nextIP);
|
2018-09-19 14:49:42 +00:00
|
|
|
MarkIPActive(nextIP);
|
|
|
|
return nextIP;
|
|
|
|
}
|
2018-08-21 18:17:16 +00:00
|
|
|
}
|
|
|
|
|
2018-09-19 14:49:42 +00:00
|
|
|
// we are full
|
|
|
|
// expire least active ip
|
|
|
|
// TODO: prevent DoS
|
2020-04-07 18:38:56 +00:00
|
|
|
std::pair<huint128_t, llarp_time_t> oldest = {huint128_t{0}, 0s};
|
2018-09-19 14:49:42 +00:00
|
|
|
|
|
|
|
// find oldest entry
|
|
|
|
auto itr = m_IPActivity.begin();
|
2020-04-07 18:38:56 +00:00
|
|
|
while (itr != m_IPActivity.end())
|
2018-09-19 14:49:42 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if (itr->second <= now)
|
2018-08-21 18:17:16 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
if ((now - itr->second) > oldest.second)
|
2018-08-21 18:17:16 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
oldest.first = itr->first;
|
2018-09-19 14:49:42 +00:00
|
|
|
oldest.second = itr->second;
|
2018-08-21 18:17:16 +00:00
|
|
|
}
|
|
|
|
}
|
2018-09-19 14:49:42 +00:00
|
|
|
++itr;
|
2018-08-21 18:17:16 +00:00
|
|
|
}
|
2018-09-19 14:49:42 +00:00
|
|
|
// remap address
|
2018-11-14 21:47:58 +00:00
|
|
|
m_IPToAddr[oldest.first] = ident;
|
2020-04-07 18:38:56 +00:00
|
|
|
m_AddrToIP[ident] = oldest.first;
|
|
|
|
m_SNodes[ident] = snode;
|
|
|
|
nextIP = oldest.first;
|
2018-08-21 18:17:16 +00:00
|
|
|
|
|
|
|
// mark ip active
|
2018-09-10 16:36:36 +00:00
|
|
|
m_IPActivity[nextIP] = std::max(m_IPActivity[nextIP], now);
|
2018-08-20 19:12:12 +00:00
|
|
|
|
|
|
|
return nextIP;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2019-06-11 16:44:05 +00:00
|
|
|
TunEndpoint::HasRemoteForIP(huint128_t ip) const
|
2018-08-20 19:12:12 +00:00
|
|
|
{
|
|
|
|
return m_IPToAddr.find(ip) != m_IPToAddr.end();
|
2018-08-18 14:01:21 +00:00
|
|
|
}
|
|
|
|
|
2018-08-21 18:17:16 +00:00
|
|
|
void
|
2019-06-11 16:44:05 +00:00
|
|
|
TunEndpoint::MarkIPActive(huint128_t ip)
|
2018-08-21 18:17:16 +00:00
|
|
|
{
|
2019-06-11 21:28:55 +00:00
|
|
|
llarp::LogDebug(Name(), " address ", ip, " is active");
|
2018-10-29 16:48:36 +00:00
|
|
|
m_IPActivity[ip] = std::max(Now(), m_IPActivity[ip]);
|
2018-08-21 18:17:16 +00:00
|
|
|
}
|
|
|
|
|
2018-09-10 11:08:09 +00:00
|
|
|
void
|
2019-06-11 16:44:05 +00:00
|
|
|
TunEndpoint::MarkIPActiveForever(huint128_t ip)
|
2018-09-10 11:08:09 +00:00
|
|
|
{
|
2020-04-07 18:38:56 +00:00
|
|
|
m_IPActivity[ip] = std::numeric_limits<llarp_time_t>::max();
|
2018-09-10 11:08:09 +00:00
|
|
|
}
|
|
|
|
|
2018-08-17 19:49:58 +00:00
|
|
|
void
|
2021-01-11 23:13:22 +00:00
|
|
|
TunEndpoint::HandleGotUserPacket(net::IPPacket pkt)
|
2018-08-17 19:49:58 +00:00
|
|
|
{
|
2021-01-11 23:13:22 +00:00
|
|
|
m_UserToNetworkPktQueue.Emplace(std::move(pkt));
|
2018-08-17 19:49:58 +00:00
|
|
|
}
|
|
|
|
|
2019-07-30 23:42:13 +00:00
|
|
|
TunEndpoint::~TunEndpoint() = default;
|
2018-08-15 15:36:34 +00:00
|
|
|
|
2018-08-16 14:34:15 +00:00
|
|
|
} // namespace handlers
|
|
|
|
} // namespace llarp
|