Merge pull request #740 from majestrate/fix-testnet-crashes-2019-07-29

Fix testnet crashes 2019 07 29
This commit is contained in:
Jeff 2019-07-29 19:16:20 -04:00 committed by GitHub
commit 3bf990cbad
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 44 additions and 37 deletions

View File

@ -336,9 +336,10 @@ namespace llarp
uint64_t txid = ++ids;
TXOwner peer(askpeer, txid);
TXOwner whoasked(OurKey(), txid);
RouterID K;
K.Randomize();
pendingExploreLookups().NewTX(
peer, whoasked, askpeer.as_array(),
new ExploreNetworkJob(askpeer.as_array(), this));
peer, whoasked, K, new ExploreNetworkJob(askpeer.as_array(), this));
}
void

View File

@ -9,7 +9,7 @@ namespace llarp
{
struct XorMetric
{
const Key_t& us;
const Key_t us;
XorMetric(const Key_t& ourKey) : us(ourKey)
{

View File

@ -13,6 +13,7 @@
#include <routing/transfer_traffic_message.hpp>
#include <util/buffer.hpp>
#include <util/endian.hpp>
#include <util/logic.hpp>
#include <deque>
@ -145,7 +146,8 @@ namespace llarp
if((currentStatus & LR_StatusRecord::SUCCESS) == 1)
{
llarp::LogDebug("LR_Status message processed, path build successful");
HandlePathConfirmMessage(r);
auto self = shared_from_this();
r->logic()->queue_func([=]() { self->HandlePathConfirmMessage(r); });
}
else
{

View File

@ -15,12 +15,14 @@
namespace llarp
{
struct AsyncPathKeyExchangeContext
: std::enable_shared_from_this< AsyncPathKeyExchangeContext >
{
using Path_t = path::Path_ptr;
using PathSet_t = path::PathSet_ptr;
PathSet_t pathset = nullptr;
Path_t path = nullptr;
using Handler = std::function< void(const AsyncPathKeyExchangeContext&) >;
using Handler =
std::function< void(std::shared_ptr< AsyncPathKeyExchangeContext >) >;
Handler result;
size_t idx = 0;
@ -96,13 +98,13 @@ namespace llarp
{
// farthest hop
// TODO: encrypt junk frames because our public keys are not eligator
logic->queue_func(std::bind(result, *this));
logic->queue_func(std::bind(result, shared_from_this()));
}
else
{
// next hop
worker->addJob(
std::bind(&AsyncPathKeyExchangeContext::GenerateNextKey, *this));
worker->addJob(std::bind(&AsyncPathKeyExchangeContext::GenerateNextKey,
shared_from_this()));
}
}
@ -120,28 +122,28 @@ namespace llarp
{
LRCM.frames[i].Randomize();
}
pool->addJob(
std::bind(&AsyncPathKeyExchangeContext::GenerateNextKey, *this));
pool->addJob(std::bind(&AsyncPathKeyExchangeContext::GenerateNextKey,
shared_from_this()));
}
};
static void
PathBuilderKeysGenerated(const AsyncPathKeyExchangeContext& ctx)
PathBuilderKeysGenerated(std::shared_ptr< AsyncPathKeyExchangeContext > ctx)
{
if(!ctx.pathset->IsStopped())
if(!ctx->pathset->IsStopped())
{
RouterID remote = ctx.path->Upstream();
const ILinkMessage* msg = &ctx.LRCM;
if(ctx.router->SendToOrQueue(remote, msg))
const RouterID remote = ctx->path->Upstream();
const ILinkMessage* msg = &ctx->LRCM;
if(ctx->router->SendToOrQueue(remote, msg))
{
// persist session with router until this path is done
ctx.router->PersistSessionUntil(remote, ctx.path->ExpireTime());
ctx->router->PersistSessionUntil(remote, ctx->path->ExpireTime());
// add own path
ctx.router->pathContext().AddOwnPath(ctx.pathset, ctx.path);
ctx.pathset->PathBuildStarted(ctx.path);
ctx->router->pathContext().AddOwnPath(ctx->pathset, ctx->path);
ctx->pathset->PathBuildStarted(ctx->path);
}
else
LogError(ctx.pathset->Name(), " failed to send LRCM to ", remote);
LogError(ctx->pathset->Name(), " failed to send LRCM to ", remote);
}
}
@ -419,15 +421,15 @@ namespace llarp
return;
lastBuild = Now();
// async generate keys
AsyncPathKeyExchangeContext ctx;
ctx.router = router;
ctx.pathset = GetSelf();
auto path = std::make_shared< path::Path >(hops, this, roles);
auto ctx = std::make_shared< AsyncPathKeyExchangeContext >();
ctx->router = router;
ctx->pathset = GetSelf();
auto path = std::make_shared< path::Path >(hops, this, roles);
LogInfo(Name(), " build ", path->HopsString());
path->SetBuildResultHook(
[this](Path_ptr p) { this->HandlePathBuilt(p); });
ctx.AsyncGenerateKeys(path, router->logic(), router->threadpool(),
&PathBuilderKeysGenerated);
ctx->AsyncGenerateKeys(path, router->logic(), router->threadpool(),
&PathBuilderKeysGenerated);
}
void

View File

@ -893,20 +893,21 @@ namespace llarp
Endpoint::OnLookup(const Address& addr, const IntroSet* introset,
const RouterID& endpoint)
{
auto now = Now();
auto& fails = m_state->m_ServiceLookupFails;
auto& lookups = m_state->m_PendingServiceLookups;
const auto now = router->Now();
auto& fails = m_state->m_ServiceLookupFails;
auto& lookups = m_state->m_PendingServiceLookups;
if(introset == nullptr || introset->IsExpired(now))
{
LogError(Name(), " failed to lookup ", addr.ToString(), " from ",
endpoint);
fails[endpoint] = fails[endpoint] + 1;
// inform one
auto itr = lookups.find(addr);
if(itr != lookups.end())
// inform all
auto range = lookups.equal_range(addr);
auto itr = range.first;
if(itr != range.second)
{
itr->second(addr, nullptr);
lookups.erase(itr);
itr = lookups.erase(itr);
}
return false;
}
@ -1169,12 +1170,12 @@ namespace llarp
if(c)
{
c->UpdateIntroSet(true);
for(auto& pending : traffic[r])
for(auto& pending : m_state->m_PendingTraffic[r])
{
c->AsyncEncryptAndSendTo(pending.Buffer(), pending.protocol);
}
}
traffic.erase(r);
m_state->m_PendingTraffic.erase(r);
},
5000, true);
}

View File

@ -52,24 +52,25 @@ namespace llarp
do
{
auto& msg = sendq.front();
while(msg.vecs.size() && sz >= msg.vecs.front().iov_len)
while(msg.vecs.size() > 0 && sz >= msg.vecs.front().iov_len)
{
sz -= msg.vecs.front().iov_len;
msg.vecs.pop_front();
msg.fragments.pop_front();
}
if(msg.vecs.size() == 0)
{
msg.Delivered();
sendq.pop_front();
}
else
else if(sz)
{
auto& front = msg.vecs.front();
front.iov_len -= sz;
front.iov_base = ((byte_t*)front.iov_base) + sz;
return;
}
else
return;
} while(sendq.size());
}
}