2021-03-10 15:11:42 +00:00
|
|
|
#include "endpoint.hpp"
|
|
|
|
#include "client.hpp"
|
|
|
|
#include "server.hpp"
|
2021-03-12 13:50:21 +00:00
|
|
|
#include "uvw/async.h"
|
2021-03-10 22:49:16 +00:00
|
|
|
#include <llarp/crypto/crypto.hpp>
|
2021-03-11 01:00:57 +00:00
|
|
|
#include <llarp/util/logging/buffer.hpp>
|
2021-03-12 13:50:21 +00:00
|
|
|
#include <llarp/service/endpoint.hpp>
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
#include <llarp/ev/ev_libuv.hpp>
|
2021-03-10 15:11:42 +00:00
|
|
|
|
|
|
|
#include <iostream>
|
2021-03-10 22:49:16 +00:00
|
|
|
#include <random>
|
2021-03-10 15:11:42 +00:00
|
|
|
#include <variant>
|
|
|
|
|
|
|
|
#include <uvw/timer.h>
|
2021-03-10 22:49:16 +00:00
|
|
|
#include <oxenmq/variant.h>
|
2021-03-10 15:11:42 +00:00
|
|
|
|
|
|
|
extern "C"
|
|
|
|
{
|
2021-03-10 22:49:16 +00:00
|
|
|
#include <sodium/crypto_generichash.h>
|
|
|
|
#include <sodium/randombytes.h>
|
2021-03-10 15:11:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace llarp::quic
|
|
|
|
{
|
2021-03-26 13:16:43 +00:00
|
|
|
Endpoint::Endpoint(EndpointBase& ep) : service_endpoint{ep}
|
2021-03-10 15:11:42 +00:00
|
|
|
{
|
2021-03-10 22:49:16 +00:00
|
|
|
randombytes_buf(static_secret.data(), static_secret.size());
|
2021-03-10 15:11:42 +00:00
|
|
|
|
|
|
|
// Set up a callback every 250ms to clean up stale sockets, etc.
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
expiry_timer = get_loop()->resource<uvw::TimerHandle>();
|
2021-03-10 15:11:42 +00:00
|
|
|
expiry_timer->on<uvw::TimerEvent>([this](const auto&, auto&) { check_timeouts(); });
|
|
|
|
expiry_timer->start(250ms, 250ms);
|
|
|
|
|
2021-03-30 20:39:40 +00:00
|
|
|
LogDebug("Created QUIC endpoint");
|
2021-03-10 15:11:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Endpoint::~Endpoint()
|
|
|
|
{
|
|
|
|
if (expiry_timer)
|
|
|
|
expiry_timer->close();
|
|
|
|
}
|
|
|
|
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
std::shared_ptr<uvw::Loop>
|
|
|
|
Endpoint::get_loop()
|
|
|
|
{
|
|
|
|
auto loop = service_endpoint.Loop()->MaybeGetUVWLoop();
|
|
|
|
assert(loop); // This object should never have been constructed if we aren't using uvw
|
|
|
|
return loop;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::receive_packet(const SockAddr& src, uint8_t ecn, bstring_view data)
|
|
|
|
{
|
|
|
|
// ngtcp2 wants a local address but we don't necessarily have something so just set it to
|
|
|
|
// IPv4 or IPv6 "unspecified" address (0.0.0.0 or ::)
|
|
|
|
SockAddr local = src.isIPv6() ? SockAddr{in6addr_any} : SockAddr{nuint32_t{INADDR_ANY}};
|
|
|
|
|
|
|
|
Packet pkt{Path{local, src}, data, ngtcp2_pkt_info{.ecn = ecn}};
|
|
|
|
|
2021-03-30 20:39:40 +00:00
|
|
|
LogTrace("[", pkt.path, ",ecn=", pkt.info.ecn, "]: received ", data.size(), " bytes");
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
|
|
|
|
handle_packet(pkt);
|
|
|
|
|
2021-03-30 20:39:40 +00:00
|
|
|
LogTrace("Done handling packet");
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::handle_packet(const Packet& p)
|
|
|
|
{
|
2021-03-30 20:39:40 +00:00
|
|
|
LogTrace("Handling incoming quic packet: ", buffer_printer{p.data});
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
auto maybe_dcid = handle_packet_init(p);
|
|
|
|
if (!maybe_dcid)
|
|
|
|
return;
|
|
|
|
auto& dcid = *maybe_dcid;
|
|
|
|
|
|
|
|
// See if we have an existing connection already established for it
|
2021-03-30 20:39:40 +00:00
|
|
|
LogTrace("Incoming connection id ", dcid);
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
auto [connptr, alias] = get_conn(dcid);
|
|
|
|
if (!connptr)
|
|
|
|
{
|
|
|
|
if (alias)
|
|
|
|
{
|
2021-03-30 20:39:40 +00:00
|
|
|
LogDebug("Incoming packet QUIC CID is an expired alias; dropping");
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
connptr = accept_initial_connection(p);
|
|
|
|
if (!connptr)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (alias)
|
2021-03-30 20:39:40 +00:00
|
|
|
LogTrace("CID is alias for primary CID ", connptr->base_cid);
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
else
|
2021-03-30 20:39:40 +00:00
|
|
|
LogTrace("CID is primary CID");
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
|
|
|
|
handle_conn_packet(*connptr, p);
|
|
|
|
}
|
|
|
|
|
2021-03-10 15:11:42 +00:00
|
|
|
std::optional<ConnectionID>
|
|
|
|
Endpoint::handle_packet_init(const Packet& p)
|
|
|
|
{
|
|
|
|
version_info vi;
|
|
|
|
auto rv = ngtcp2_pkt_decode_version_cid(
|
|
|
|
&vi.version,
|
|
|
|
&vi.dcid,
|
|
|
|
&vi.dcid_len,
|
|
|
|
&vi.scid,
|
|
|
|
&vi.scid_len,
|
|
|
|
u8data(p.data),
|
|
|
|
p.data.size(),
|
|
|
|
NGTCP2_MAX_CIDLEN);
|
|
|
|
if (rv == 1)
|
|
|
|
{ // 1 means Version Negotiation should be sent and otherwise the packet should be ignored
|
|
|
|
send_version_negotiation(vi, p.path.remote);
|
|
|
|
return std::nullopt;
|
|
|
|
}
|
2021-03-11 01:00:57 +00:00
|
|
|
if (rv != 0)
|
2021-03-10 15:11:42 +00:00
|
|
|
{
|
2021-03-11 01:00:57 +00:00
|
|
|
LogWarn("QUIC packet header decode failed: ", ngtcp2_strerror(rv));
|
2021-03-10 15:11:42 +00:00
|
|
|
return std::nullopt;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vi.dcid_len > ConnectionID::max_size())
|
|
|
|
{
|
2021-03-11 01:00:57 +00:00
|
|
|
LogWarn("Internal error: destination ID is longer than should be allowed");
|
2021-03-10 15:11:42 +00:00
|
|
|
return std::nullopt;
|
|
|
|
}
|
|
|
|
|
|
|
|
return std::make_optional<ConnectionID>(vi.dcid, vi.dcid_len);
|
|
|
|
}
|
|
|
|
void
|
|
|
|
Endpoint::handle_conn_packet(Connection& conn, const Packet& p)
|
|
|
|
{
|
|
|
|
if (ngtcp2_conn_is_in_closing_period(conn))
|
|
|
|
{
|
2021-03-11 01:00:57 +00:00
|
|
|
LogDebug("Connection is in closing period, dropping");
|
2021-03-10 15:11:42 +00:00
|
|
|
close_connection(conn);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (conn.draining)
|
|
|
|
{
|
2021-03-11 01:00:57 +00:00
|
|
|
LogDebug("Connection is draining, dropping");
|
2021-03-10 15:11:42 +00:00
|
|
|
// "draining" state means we received a connection close and we're keeping the
|
|
|
|
// connection alive just to catch (and discard) straggling packets that arrive
|
|
|
|
// out of order w.r.t to connection close.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (auto result = read_packet(p, conn); !result)
|
|
|
|
{
|
2021-03-11 01:00:57 +00:00
|
|
|
LogWarn("Read packet failed! ", ngtcp2_strerror(result.error_code));
|
2021-03-10 15:11:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME - reset idle timer?
|
2021-03-30 20:39:40 +00:00
|
|
|
LogTrace("Done with incoming packet");
|
2021-03-10 15:11:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
io_result
|
|
|
|
Endpoint::read_packet(const Packet& p, Connection& conn)
|
|
|
|
{
|
2021-03-30 20:39:40 +00:00
|
|
|
LogTrace("Reading packet from ", p.path);
|
2021-03-10 15:11:42 +00:00
|
|
|
auto rv =
|
|
|
|
ngtcp2_conn_read_pkt(conn, p.path, &p.info, u8data(p.data), p.data.size(), get_timestamp());
|
|
|
|
|
|
|
|
if (rv == 0)
|
|
|
|
conn.io_ready();
|
|
|
|
else
|
2021-03-11 01:00:57 +00:00
|
|
|
LogWarn("read pkt error: ", ngtcp2_strerror(rv));
|
2021-03-10 15:11:42 +00:00
|
|
|
|
|
|
|
if (rv == NGTCP2_ERR_DRAINING)
|
|
|
|
start_draining(conn);
|
|
|
|
else if (rv == NGTCP2_ERR_DROP_CONN)
|
|
|
|
delete_conn(conn.base_cid);
|
|
|
|
|
|
|
|
return {rv};
|
|
|
|
}
|
|
|
|
|
|
|
|
io_result
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
Endpoint::send_packet(const Address& to, bstring_view data, uint8_t ecn)
|
2021-03-10 15:11:42 +00:00
|
|
|
{
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
assert(service_endpoint.Loop()->inEventLoop());
|
|
|
|
|
|
|
|
size_t header_size = write_packet_header(to.port(), ecn);
|
|
|
|
size_t outgoing_len = header_size + data.size();
|
|
|
|
assert(outgoing_len <= buf_.size());
|
|
|
|
std::memcpy(&buf_[header_size], data.data(), data.size());
|
|
|
|
bstring_view outgoing{buf_.data(), outgoing_len};
|
|
|
|
|
2021-03-29 22:15:56 +00:00
|
|
|
if (service_endpoint.SendToOrQueue(to, outgoing, service::ProtocolType::QUIC))
|
|
|
|
{
|
2021-03-30 20:39:40 +00:00
|
|
|
LogTrace("[", to, "]: sent ", buffer_printer{outgoing});
|
2021-03-29 22:15:56 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LogDebug("Failed to send to quic endpoint ", to, "; was sending ", outgoing.size(), "B");
|
|
|
|
}
|
2021-03-10 15:11:42 +00:00
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::send_version_negotiation(const version_info& vi, const Address& source)
|
|
|
|
{
|
2021-09-24 21:15:50 +00:00
|
|
|
std::array<std::byte, Endpoint::max_pkt_size_v4> buf;
|
2021-03-10 15:11:42 +00:00
|
|
|
std::array<uint32_t, NGTCP2_PROTO_VER_MAX - NGTCP2_PROTO_VER_MIN + 2> versions;
|
|
|
|
std::iota(versions.begin() + 1, versions.end(), NGTCP2_PROTO_VER_MIN);
|
|
|
|
// we're supposed to send some 0x?a?a?a?a version to trigger version negotiation
|
|
|
|
versions[0] = 0x1a2a3a4au;
|
|
|
|
|
2021-03-10 22:49:16 +00:00
|
|
|
CSRNG rng{};
|
2021-03-10 15:11:42 +00:00
|
|
|
auto nwrote = ngtcp2_pkt_write_version_negotiation(
|
|
|
|
u8data(buf),
|
|
|
|
buf.size(),
|
|
|
|
std::uniform_int_distribution<uint8_t>{0, 255}(rng),
|
|
|
|
vi.dcid,
|
|
|
|
vi.dcid_len,
|
|
|
|
vi.scid,
|
|
|
|
vi.scid_len,
|
|
|
|
versions.data(),
|
|
|
|
versions.size());
|
|
|
|
if (nwrote < 0)
|
2021-03-11 01:00:57 +00:00
|
|
|
LogWarn("Failed to construct version negotiation packet: ", ngtcp2_strerror(nwrote));
|
2021-03-10 15:11:42 +00:00
|
|
|
if (nwrote <= 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
send_packet(source, bstring_view{buf.data(), static_cast<size_t>(nwrote)}, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::close_connection(Connection& conn, uint64_t code, bool application)
|
|
|
|
{
|
2021-03-11 01:00:57 +00:00
|
|
|
LogDebug("Closing connection ", conn.base_cid);
|
2021-03-10 15:11:42 +00:00
|
|
|
if (!conn.closing)
|
|
|
|
{
|
|
|
|
conn.conn_buffer.resize(max_pkt_size_v4);
|
|
|
|
Path path;
|
|
|
|
ngtcp2_pkt_info pi;
|
|
|
|
|
2021-09-24 21:15:50 +00:00
|
|
|
auto write_close_func = application ? ngtcp2_conn_write_application_close_versioned
|
|
|
|
: ngtcp2_conn_write_connection_close_versioned;
|
|
|
|
|
2021-03-10 15:11:42 +00:00
|
|
|
auto written = write_close_func(
|
|
|
|
conn,
|
|
|
|
path,
|
2021-09-24 21:15:50 +00:00
|
|
|
NGTCP2_PKT_INFO_VERSION,
|
2021-03-10 15:11:42 +00:00
|
|
|
&pi,
|
|
|
|
u8data(conn.conn_buffer),
|
|
|
|
conn.conn_buffer.size(),
|
|
|
|
code,
|
2022-01-31 17:20:27 +00:00
|
|
|
nullptr, // reason
|
|
|
|
0, // reason length
|
2021-03-10 15:11:42 +00:00
|
|
|
get_timestamp());
|
|
|
|
if (written <= 0)
|
|
|
|
{
|
2021-03-11 01:00:57 +00:00
|
|
|
LogWarn(
|
2021-03-10 15:11:42 +00:00
|
|
|
"Failed to write connection close packet: ",
|
|
|
|
written < 0 ? ngtcp2_strerror(written) : "unknown error: closing is 0 bytes??");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
assert(written <= (long)conn.conn_buffer.size());
|
|
|
|
conn.conn_buffer.resize(written);
|
|
|
|
conn.closing = true;
|
|
|
|
|
|
|
|
conn.path = path;
|
|
|
|
}
|
|
|
|
assert(conn.closing && !conn.conn_buffer.empty());
|
|
|
|
|
QUIC lokinet integration refactor
Refactors how quic packets get handled: the actual tunnels now live in
tunnel.hpp's TunnelManager which holds and manages all the quic<->tcp
tunnelling. service::Endpoint now holds a TunnelManager rather than a
quic::Server. We only need one quic server, but we need a separate quic
client instance per outgoing quic tunnel, and TunnelManager handles all
that glue now.
Adds QUIC packet handling to get to the right tunnel code. This
required multiplexing incoming quic packets, as follows:
Adds a very small quic tunnel packet header of 4 bytes:
[1, SPORT, ECN] for client->server packets, where SPORT is our
source "port" (really: just a uint16_t unique quic instance
identifier)
or
[2, DPORT, ECN] for server->client packets where the DPORT is the SPORT
from above.
(This also reworks ECN bits to get properly carried over lokinet.)
We don't need a destination/source port for the server-side because
there is only ever one quic server (and we know we're going to it when
the first byte of the header is 1).
Removes the config option for quic exposing ports; a full lokinet will
simply accept anything incoming on quic and tunnel it to the requested
port on the the local endpoint IP (this handler will come in a following
commit).
Replace ConvoTags with full addresses: we need to carry the port, as
well, which the ConvoTag can't give us, so change those to more general
SockAddrs from which we can extract both the ConvoTag *and* the port.
Add a pending connection queue along with new quic-side handlers to call
when a stream becomes available (TunnelManager uses this to wire up
pending incoming conns with quic streams as streams open up).
Completely get rid of tunnel_server/tunnel_client.cpp code; it is now
moved to tunnel.hpp.
Add listen()/forget() methods in TunnelManager for setting up quic
listening sockets (for liblokinet usage).
Add open()/close() methods in TunnelManager for spinning up new quic
clients for outgoing quic connections.
2021-03-23 19:26:32 +00:00
|
|
|
if (auto sent = send_packet(conn.path.remote, conn.conn_buffer, 0); not sent)
|
2021-03-10 15:11:42 +00:00
|
|
|
{
|
2021-03-11 01:00:57 +00:00
|
|
|
LogWarn(
|
2021-03-10 15:11:42 +00:00
|
|
|
"Failed to send packet: ",
|
|
|
|
strerror(sent.error_code),
|
|
|
|
"; removing connection ",
|
|
|
|
conn.base_cid);
|
|
|
|
delete_conn(conn.base_cid);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Puts a connection into draining mode (i.e. after getting a connection close). This will
|
|
|
|
/// keep the connection registered for the recommended 3*Probe Timeout, during which we drop
|
|
|
|
/// packets that use the connection id and after which we will forget about it.
|
|
|
|
void
|
|
|
|
Endpoint::start_draining(Connection& conn)
|
|
|
|
{
|
|
|
|
if (conn.draining)
|
|
|
|
return;
|
2021-03-11 01:00:57 +00:00
|
|
|
LogDebug("Putting ", conn.base_cid, " into draining mode");
|
2021-03-10 15:11:42 +00:00
|
|
|
conn.draining = true;
|
|
|
|
// Recommended draining time is 3*Probe Timeout
|
|
|
|
draining.emplace(conn.base_cid, get_time() + ngtcp2_conn_get_pto(conn) * 3 * 1ns);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::check_timeouts()
|
|
|
|
{
|
|
|
|
auto now = get_time();
|
|
|
|
uint64_t now_ts = get_timestamp(now);
|
|
|
|
|
|
|
|
// Destroy any connections that are finished draining
|
|
|
|
bool cleanup = false;
|
|
|
|
while (!draining.empty() && draining.front().second < now)
|
|
|
|
{
|
|
|
|
if (auto it = conns.find(draining.front().first); it != conns.end())
|
|
|
|
{
|
|
|
|
if (std::holds_alternative<primary_conn_ptr>(it->second))
|
|
|
|
cleanup = true;
|
2021-03-11 01:00:57 +00:00
|
|
|
LogDebug("Deleting connection ", it->first);
|
2021-03-10 15:11:42 +00:00
|
|
|
conns.erase(it);
|
|
|
|
}
|
|
|
|
draining.pop();
|
|
|
|
}
|
|
|
|
if (cleanup)
|
|
|
|
clean_alias_conns();
|
|
|
|
|
|
|
|
for (auto it = conns.begin(); it != conns.end(); ++it)
|
|
|
|
{
|
|
|
|
if (auto* conn_ptr = std::get_if<primary_conn_ptr>(&it->second))
|
|
|
|
{
|
|
|
|
Connection& conn = **conn_ptr;
|
|
|
|
auto exp = ngtcp2_conn_get_idle_expiry(conn);
|
|
|
|
if (exp >= now_ts || conn.draining)
|
|
|
|
continue;
|
|
|
|
start_draining(conn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::pair<std::shared_ptr<Connection>, bool>
|
|
|
|
Endpoint::get_conn(const ConnectionID& cid)
|
|
|
|
{
|
|
|
|
if (auto it = conns.find(cid); it != conns.end())
|
|
|
|
{
|
|
|
|
if (auto* wptr = std::get_if<alias_conn_ptr>(&it->second))
|
|
|
|
return {wptr->lock(), true};
|
|
|
|
return {var::get<primary_conn_ptr>(it->second), false};
|
|
|
|
}
|
|
|
|
return {nullptr, false};
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Endpoint::delete_conn(const ConnectionID& cid)
|
|
|
|
{
|
|
|
|
auto it = conns.find(cid);
|
|
|
|
if (it == conns.end())
|
|
|
|
{
|
2021-03-11 01:00:57 +00:00
|
|
|
LogDebug("Cannot delete connection ", cid, ": cid not found");
|
2021-03-10 15:11:42 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool primary = std::holds_alternative<primary_conn_ptr>(it->second);
|
2021-03-11 01:00:57 +00:00
|
|
|
LogDebug("Deleting ", primary ? "primary" : "alias", " connection ", cid);
|
2021-03-10 15:11:42 +00:00
|
|
|
conns.erase(it);
|
|
|
|
if (primary)
|
|
|
|
clean_alias_conns();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::clean_alias_conns()
|
|
|
|
{
|
|
|
|
for (auto it = conns.begin(); it != conns.end();)
|
|
|
|
{
|
|
|
|
if (auto* conn_wptr = std::get_if<alias_conn_ptr>(&it->second);
|
|
|
|
conn_wptr && conn_wptr->expired())
|
|
|
|
it = conns.erase(it);
|
|
|
|
else
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ConnectionID
|
|
|
|
Endpoint::add_connection_id(Connection& conn, size_t cid_length)
|
|
|
|
{
|
|
|
|
ConnectionID cid;
|
|
|
|
for (bool inserted = false; !inserted;)
|
|
|
|
{
|
2021-03-10 22:49:16 +00:00
|
|
|
cid = ConnectionID::random(cid_length);
|
2021-03-10 15:11:42 +00:00
|
|
|
inserted = conns.emplace(cid, conn.weak_from_this()).second;
|
|
|
|
}
|
2021-03-11 01:00:57 +00:00
|
|
|
LogDebug("Created cid ", cid, " alias for ", conn.base_cid);
|
2021-03-10 15:11:42 +00:00
|
|
|
return cid;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Endpoint::make_stateless_reset_token(const ConnectionID& cid, unsigned char* dest)
|
|
|
|
{
|
|
|
|
crypto_generichash_state state;
|
|
|
|
crypto_generichash_init(&state, nullptr, 0, NGTCP2_STATELESS_RESET_TOKENLEN);
|
|
|
|
crypto_generichash_update(&state, u8data(static_secret), static_secret.size());
|
|
|
|
crypto_generichash_update(&state, cid.data, cid.datalen);
|
|
|
|
crypto_generichash_final(&state, dest, NGTCP2_STATELESS_RESET_TOKENLEN);
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace llarp::quic
|