actually have rid in command registration

pull/2232/head
dr7ana 5 months ago
parent 5ab40a7a7f
commit 1364e4ce53

@ -123,12 +123,9 @@ namespace llarp
}
void
LinkManager::register_commands(std::shared_ptr<oxen::quic::BTRequestStream>& s)
LinkManager::register_commands(std::shared_ptr<oxen::quic::BTRequestStream>& s, const RouterID& router_id)
{
log::critical(logcat, "{} called", __PRETTY_FUNCTION__);
const RouterID& router_id{s->conn.remote_key()};
log::critical(logcat, "Registering commands (RID:{})", router_id);
s->register_command("bfetch_rcs"s, [this](oxen::quic::message m) {
_router.loop()->call(
@ -249,7 +246,7 @@ namespace llarp
error_code);
s.conn.close_connection(error_code);
});
register_commands(s);
// register_commands(s);
return s;
}
@ -280,7 +277,7 @@ namespace llarp
log::critical(logcat, "Queued BTStream to be opened ID:{}", control_stream->stream_id());
assert(control_stream->stream_id() == 0);
// register_commands(control_stream);
register_commands(control_stream, rid);
itr->second = std::make_shared<link::Connection>(ci.shared_from_this(), control_stream);
log::critical(logcat, "Successfully configured inbound connection fom {}...", rid);
@ -369,6 +366,9 @@ namespace llarp
if (auto p_itr = pending_conn_msg_queue.find(rid); p_itr != pending_conn_msg_queue.end())
pending_conn_msg_queue.erase(p_itr);
if (auto c_itr = ep.pending_conns.find(rid); c_itr != ep.pending_conns.end())
ep.pending_conns.erase(c_itr);
if (auto m_itr = ep.active_conns.find(rid); m_itr != ep.active_conns.end())
ep.active_conns.erase(m_itr);
@ -714,7 +714,7 @@ namespace llarp
oxenc::bt_dict_consumer btdc{m.body()};
btdc.required("local");
auto rc_dict = btdc.consume_dict_data();
log::critical(logcat, "incoming dict data: {}", oxenc::to_hex(rc_dict));
// log::critical(logcat, "incoming dict data: {}", oxenc::to_hex(rc_dict));
remote = RemoteRC{rc_dict};
quantity = btdc.require<size_t>("quantity");
}

@ -213,7 +213,7 @@ namespace llarp
startup_endpoint();
void
register_commands(std::shared_ptr<oxen::quic::BTRequestStream>& s);
register_commands(std::shared_ptr<oxen::quic::BTRequestStream>& s, const RouterID& rid);
public:
const link::Endpoint&
@ -416,7 +416,7 @@ namespace llarp
s.conn.close_connection(error_code);
});
link_manager.register_commands(control_stream);
link_manager.register_commands(control_stream, rid);
itr->second = std::make_shared<link::Connection>(conn_interface, control_stream);
log::critical(logcat, "Connection to RID:{} added to pending connections...", rid);

@ -776,7 +776,7 @@ namespace llarp
if (is_service_node())
{
log::critical(
log::info(
logcat,
"Local service node has {} client connections since last RC update ({} to expiry)",
num_client_connections(),
@ -911,11 +911,12 @@ namespace llarp
// (client-only) periodically fetch updated RCs
if (now_timepoint - last_rc_fetch > RC_UPDATE_INTERVAL)
{
log::critical(logcat, "Time to fetch RCs!");
node_db()->fetch_rcs();
}
// (client-only) periodically fetch updated RouterID list
if (now_timepoint - last_rid_fetch > ROUTERID_UPDATE_INTERVAL)
if (not is_snode and now_timepoint - last_rid_fetch > ROUTERID_UPDATE_INTERVAL)
{
node_db()->fetch_rids();
}

@ -48,7 +48,7 @@ namespace llarp
static constexpr size_t INTROSET_STORAGE_REDUNDANCY =
(INTROSET_RELAY_REDUNDANCY * INTROSET_REQS_PER_RELAY);
static const std::chrono::seconds RC_UPDATE_INTERVAL = 5min;
static const std::chrono::seconds RC_UPDATE_INTERVAL = 4min;
static const std::chrono::seconds ROUTERID_UPDATE_INTERVAL = 1h;
struct Contacts;
@ -105,7 +105,6 @@ namespace llarp
std::shared_ptr<NodeDB> _node_db;
llarp_time_t _started_at;
const oxenmq::TaggedThreadID _disk_thread;
// oxen::quic::Network _net; // DISCUSS: we don't use this anywhere..?
llarp_time_t _last_stats_report = 0s;
llarp_time_t _next_decomm_warning = time_now_ms() + 15s;

@ -46,13 +46,6 @@ namespace llarp
throw std::runtime_error{err};
}
log::error(
log::Cat("FIXME"),
"ABOUT TO VERIFY THIS: {}, WITH SIG {}, SIGNED BY {}",
oxenc::to_hex(msg),
oxenc::to_hex(sig),
router_id().ToHex());
if (not crypto::verify(router_id(), msg, sig))
throw std::runtime_error{"Failed to verify RemoteRC signature"};
});

Loading…
Cancel
Save