Merge tag 'v0.6.3' into debian/sid

This commit is contained in:
Jason Rhinelander 2020-01-16 01:25:29 -04:00
commit c4d30de8fa
24 changed files with 223 additions and 195 deletions

View File

@ -37,7 +37,6 @@ def main():
exe = 'valgrind {}'.format(args.bin)
else:
exe = args.bin
basedir = os.path.abspath(args.dir)
for nodeid in range(args.svc):
@ -108,7 +107,7 @@ def main():
fp = os.path.join(d, 'client.ini')
with open(fp, 'w') as f:
config.write(f)
for n in range(args.connect):
for n in [0]:
otherID = (n + nodeid) % args.svc
f.write("[bootstrap]\nadd-node={}\n".format(os.path.join(basedir,svcNodeName(otherID), 'rc.signed')))
with open(hiddenservice, 'w') as f:

View File

@ -1,6 +1,6 @@
#include <libntrup/ntru.h>
#if __AVX2__
#ifdef __x86_64__
#include <cpuid.h>
#include <array>

View File

@ -8,7 +8,7 @@
#define LLARP_VERSION_MAJ 0
#define LLARP_VERSION_MIN 6
#define LLARP_VERSION_PATCH 2
#define LLARP_VERSION_PATCH 3
#define LLARP_DEFAULT_NETID "lokinet"

View File

@ -183,16 +183,6 @@ __ ___ ____ _ _ ___ _ _ ____
llarp::LogError("nodedb_dir is incorrect");
return 0;
}
// llarp::LogInfo("nodedb_dir [", nodedb_dir, "] configured!");
ssize_t loaded = nodedb->load_dir(nodedb_dir.c_str());
llarp::LogInfo("nodedb_dir loaded ", loaded, " RCs from [", nodedb_dir,
"]");
if(loaded < 0)
{
// shouldn't be possible
llarp::LogError("nodedb_dir directory doesn't exist");
return 0;
}
return 1;
}
@ -211,7 +201,7 @@ __ ___ ____ _ _ ___ _ _ ____
router = std::make_unique< Router >(worker, mainloop, logic);
nodedb = std::make_unique< llarp_nodedb >(router->diskworker());
nodedb = std::make_unique< llarp_nodedb >(router->diskworker(), nodedb_dir);
if(!router->Configure(config.get(), nodedb.get()))
{

View File

@ -20,7 +20,7 @@
#include <util/thread/logic.hpp>
#include <nodedb.hpp>
#include <profiling.hpp>
#include <router/i_rc_lookup_handler.hpp>
#include <vector>
namespace llarp
@ -41,7 +41,7 @@ namespace llarp
void
StoreRC(const RouterContact rc) const override
{
GetRouter()->nodedb()->InsertAsync(rc);
GetRouter()->rcLookupHandler().CheckRC(rc);
}
/// on behalf of whoasked request introset for target from dht router with
@ -598,7 +598,7 @@ namespace llarp
TXOwner peer(askpeer, ++ids);
_pendingIntrosetLookups.NewTX(
peer, asker, addr,
new ServiceAddressLookup(asker, addr, this, R, handler));
new ServiceAddressLookup(asker, addr, this, R, handler), (R * 2000));
}
void
@ -611,7 +611,7 @@ namespace llarp
TXOwner peer(askpeer, ++ids);
_pendingIntrosetLookups.NewTX(
peer, asker, addr,
new ServiceAddressLookup(asker, addr, this, 0, handler));
new ServiceAddressLookup(asker, addr, this, 0, handler), 1000);
}
void

View File

@ -35,7 +35,7 @@ llarp_dht_allow_transit(llarp_dht_context *ctx)
void
llarp_dht_context_start(struct llarp_dht_context *ctx, const byte_t *key)
{
ctx->impl->Init(llarp::dht::Key_t(key), ctx->parent, 20000);
ctx->impl->Init(llarp::dht::Key_t(key), ctx->parent, 2000);
}
void

View File

@ -43,7 +43,15 @@ namespace llarp
found = rc;
}
valuesFound.clear();
valuesFound.emplace_back(found);
if(not found.pubkey.IsZero())
{
valuesFound.resize(1);
valuesFound[0] = found;
}
else
{
llarp::LogWarn("We found a null RC for dht request, dropping it");
}
}
routing::DHTMessage msg;
msg.M.emplace_back(new GotRouterMessage(parent->OurKey(), whoasked.txid,

View File

@ -167,7 +167,12 @@ namespace llarp
return false;
}
RouterContact found;
const Key_t k{K};
if(K.IsZero())
{
llarp::LogError("invalid FRM from ", From, "K is zero");
return false;
}
const Key_t k(K);
if(exploritory)
return dht.HandleExploritoryRouterLookup(From, txid, K, replies);
if(!dht.GetRouter()->ConnectionToRouterAllowed(K))

View File

@ -4,6 +4,7 @@
#include <memory>
#include <path/path_context.hpp>
#include <router/abstractrouter.hpp>
#include <router/i_rc_lookup_handler.hpp>
namespace llarp
{
@ -112,12 +113,19 @@ namespace llarp
LogDebug("got ", R.size(), " results in GRM for lookup");
if(R.size() == 0)
dht.pendingRouterLookups().NotFound(owner, K);
else if(R[0].pubkey.IsZero())
return false;
else
dht.pendingRouterLookups().Found(owner, R[0].pubkey, R);
return true;
}
llarp::LogWarn("Unwarranted GRM from ", From, " txid=", txid);
return false;
// store if valid
for(const auto &rc : R)
{
if(not dht.GetRouter()->rcLookupHandler().CheckRC(rc))
return false;
}
return true;
}
} // namespace dht
} // namespace llarp

View File

@ -18,8 +18,9 @@ namespace llarp
{
}
GotRouterMessage(const Key_t& from, uint64_t id,
std::vector< RouterContact > results, bool tunneled)
: IMessage(from), R(std::move(results)), txid(id), relayed(tunneled)
const std::vector< RouterContact >& results,
bool tunneled)
: IMessage(from), R(results), txid(id), relayed(tunneled)
{
}

View File

@ -3,6 +3,10 @@
#include <dht/context.hpp>
#include <dht/messages/findrouter.hpp>
#include <dht/messages/gotrouter.hpp>
#include <router/abstractrouter.hpp>
#include <router/i_rc_lookup_handler.hpp>
#include <utility>
namespace llarp
@ -42,6 +46,7 @@ namespace llarp
void
RecursiveRouterLookup::DoNextRequest(const Key_t &peer)
{
peersAsked.emplace(peer);
parent->LookupRouterRecursive(target, whoasked.node, whoasked.txid, peer,
resultHandler);
}
@ -49,6 +54,7 @@ namespace llarp
void
RecursiveRouterLookup::Start(const TXOwner &peer)
{
peersAsked.emplace(peer.node);
parent->DHTSendTo(peer.node.as_array(),
new FindRouterMessage(peer.txid, target));
}
@ -61,7 +67,8 @@ namespace llarp
RouterContact found;
for(const auto &rc : valuesFound)
{
if(found.OtherIsNewer(rc))
if(found.OtherIsNewer(rc)
&& parent->GetRouter()->rcLookupHandler().CheckRC(rc))
found = rc;
}
valuesFound.clear();
@ -77,9 +84,6 @@ namespace llarp
whoasked.node.as_array(),
new GotRouterMessage({}, whoasked.txid, valuesFound, false), false);
}
// store this in our nodedb for caching
if(valuesFound.size() > 0)
parent->StoreRC(valuesFound[0]);
}
} // namespace dht
} // namespace llarp

View File

@ -13,8 +13,7 @@ namespace llarp
{
namespace dht
{
template < typename K, typename V, typename K_Hash,
llarp_time_t requestTimeoutMS = 30000UL >
template < typename K, typename V, typename K_Hash >
struct TXHolder
{
using TXPtr = std::unique_ptr< TX< K, V > >;
@ -72,7 +71,7 @@ namespace llarp
void
NewTX(const TXOwner& askpeer, const TXOwner& whoasked, const K& k,
TX< K, V >* t);
TX< K, V >* t, llarp_time_t requestTimeoutMS = 15000);
/// mark tx as not fond
void
@ -93,11 +92,9 @@ namespace llarp
Expire(llarp_time_t now);
};
template < typename K, typename V, typename K_Hash,
llarp_time_t requestTimeoutMS >
template < typename K, typename V, typename K_Hash >
const TX< K, V >*
TXHolder< K, V, K_Hash, requestTimeoutMS >::GetPendingLookupFrom(
const TXOwner& owner) const
TXHolder< K, V, K_Hash >::GetPendingLookupFrom(const TXOwner& owner) const
{
auto itr = tx.find(owner);
if(itr == tx.end())
@ -108,12 +105,12 @@ namespace llarp
return itr->second.get();
}
template < typename K, typename V, typename K_Hash,
llarp_time_t requestTimeoutMS >
template < typename K, typename V, typename K_Hash >
void
TXHolder< K, V, K_Hash, requestTimeoutMS >::NewTX(const TXOwner& askpeer,
const TXOwner& whoasked,
const K& k, TX< K, V >* t)
TXHolder< K, V, K_Hash >::NewTX(const TXOwner& askpeer,
const TXOwner& whoasked, const K& k,
TX< K, V >* t,
llarp_time_t requestTimeoutMS)
{
(void)whoasked;
tx.emplace(askpeer, std::unique_ptr< TX< K, V > >(t));
@ -131,30 +128,24 @@ namespace llarp
}
}
template < typename K, typename V, typename K_Hash,
llarp_time_t requestTimeoutMS >
template < typename K, typename V, typename K_Hash >
void
TXHolder< K, V, K_Hash, requestTimeoutMS >::NotFound(
const TXOwner& from, const std::unique_ptr< Key_t >& next)
TXHolder< K, V, K_Hash >::NotFound(const TXOwner& from,
const std::unique_ptr< Key_t >& next)
{
auto txitr = tx.find(from);
if(txitr == tx.end())
{
return;
}
// ask for next peer
if(!txitr->second->AskNextPeer(from.node, next))
Inform(from, txitr->second->target, {}, true, true);
Inform(from, txitr->second->target, {}, true, true);
}
template < typename K, typename V, typename K_Hash,
llarp_time_t requestTimeoutMS >
template < typename K, typename V, typename K_Hash >
void
TXHolder< K, V, K_Hash, requestTimeoutMS >::Inform(TXOwner from, K key,
std::vector< V > values,
bool sendreply,
bool removeTimeouts)
TXHolder< K, V, K_Hash >::Inform(TXOwner from, K key,
std::vector< V > values, bool sendreply,
bool removeTimeouts)
{
auto range = waiting.equal_range(key);
auto itr = range.first;
@ -187,15 +178,14 @@ namespace llarp
}
}
template < typename K, typename V, typename K_Hash,
llarp_time_t requestTimeoutMS >
template < typename K, typename V, typename K_Hash >
void
TXHolder< K, V, K_Hash, requestTimeoutMS >::Expire(llarp_time_t now)
TXHolder< K, V, K_Hash >::Expire(llarp_time_t now)
{
auto itr = timeouts.begin();
while(itr != timeouts.end())
{
if(now > itr->second && now - itr->second >= requestTimeoutMS)
if(now >= itr->second)
{
Inform(TXOwner{}, itr->first, {}, true, false);
itr = timeouts.erase(itr);

View File

@ -149,44 +149,17 @@ llarp_nodedb::UpdateAsyncIfNewer(llarp::RouterContact rc,
return false;
}
/// insert and write to disk
/// insert
bool
llarp_nodedb::Insert(const llarp::RouterContact &rc)
{
std::array< byte_t, MAX_RC_SIZE > tmp;
llarp_buffer_t buf(tmp);
if(!rc.BEncode(&buf))
return false;
buf.sz = buf.cur - buf.base;
auto filepath = getRCFilePath(rc.pubkey);
llarp::LogDebug("saving RC.pubkey ", filepath);
auto optional_ofs = llarp::util::OpenFileStream< std::ofstream >(
filepath,
std::ofstream::out | std::ofstream::binary | std::ofstream::trunc);
if(!optional_ofs)
return false;
auto &ofs = optional_ofs.value();
ofs.write((char *)buf.base, buf.sz);
ofs.flush();
ofs.close();
if(!ofs)
{
llarp::LogError("Failed to write: ", filepath);
return false;
}
llarp::LogDebug("saved RC.pubkey: ", filepath);
// save rc after writing to disk
{
llarp::util::Lock lock(&access);
auto itr = entries.find(rc.pubkey.as_array());
if(itr != entries.end())
entries.erase(itr);
entries.emplace(rc.pubkey.as_array(), rc);
LogDebug("Added or updated RC for ", llarp::RouterID(rc.pubkey),
" to nodedb. Current nodedb count is: ", entries.size());
}
llarp::util::Lock lock(&access);
auto itr = entries.find(rc.pubkey.as_array());
if(itr != entries.end())
entries.erase(itr);
entries.emplace(rc.pubkey.as_array(), rc);
LogDebug("Added or updated RC for ", llarp::RouterID(rc.pubkey),
" to nodedb. Current nodedb count is: ", entries.size());
return true;
}
@ -212,6 +185,7 @@ llarp_nodedb::Load(const fs::path &path)
if(l > 0)
loaded += l;
}
m_NextSaveToDisk = llarp::time_now_ms() + m_SaveInterval;
return loaded;
}
@ -241,10 +215,19 @@ llarp_nodedb::SaveAll()
}
}
bool
llarp_nodedb::ShouldSaveToDisk(llarp_time_t now) const
{
if(now == 0)
now = llarp::time_now_ms();
return m_NextSaveToDisk > 0 && m_NextSaveToDisk <= now;
}
void
llarp_nodedb::AsyncFlushToDisk()
{
disk->addJob(std::bind(&llarp_nodedb::SaveAll, this));
m_NextSaveToDisk = llarp::time_now_ms() + m_SaveInterval;
}
ssize_t
@ -375,16 +358,17 @@ crypto_threadworker_verifyrc(void *user)
// if it's valid we need to set it
if(verify_request->valid && rc.IsPublicRouter())
{
llarp::LogDebug("RC is valid, saving to disk");
verify_request->diskworker->addJob(
std::bind(&disk_threadworker_setRC, verify_request));
}
else
{
// callback to logic thread
verify_request->logic->queue_job(
{verify_request, &logic_threadworker_callback});
if(verify_request->diskworker)
{
llarp::LogDebug("RC is valid, saving to disk");
verify_request->diskworker->addJob(
std::bind(&disk_threadworker_setRC, verify_request));
return;
}
}
// callback to logic thread
verify_request->logic->queue_job(
{verify_request, &logic_threadworker_callback});
}
void
@ -394,6 +378,12 @@ nodedb_inform_load_rc(void *user)
job->hook(job);
}
void
llarp_nodedb_async_verify(struct llarp_async_verify_rc *job)
{
job->cryptoworker->addJob(std::bind(&crypto_threadworker_verifyrc, job));
}
void
nodedb_async_load_rc(void *user)
{
@ -440,43 +430,12 @@ llarp_nodedb::ensure_dir(const char *dir)
return true;
}
void
llarp_nodedb::set_dir(const char *dir)
{
nodePath = dir;
}
ssize_t
llarp_nodedb::load_dir(const char *dir)
llarp_nodedb::LoadAll()
{
std::error_code ec;
if(!fs::exists(dir, ec))
{
return -1;
}
set_dir(dir);
return Load(dir);
return Load(nodePath.c_str());
}
/// maybe rename to verify_and_set
void
llarp_nodedb_async_verify(struct llarp_async_verify_rc *job)
{
// switch to crypto threadpool and continue with
// crypto_threadworker_verifyrc
job->cryptoworker->addJob(std::bind(&crypto_threadworker_verifyrc, job));
}
// disabled for now
/*
void
llarp_nodedb_async_load_rc(struct llarp_async_load_rc *job)
{
// call in the disk io thread so we don't bog down the others
llarp_threadpool_queue_job(job->diskworker, {job, &nodedb_async_load_rc});
}
*/
size_t
llarp_nodedb::num_loaded() const
{

View File

@ -40,8 +40,10 @@ struct llarp_nodedb_iter
struct llarp_nodedb
{
explicit llarp_nodedb(std::shared_ptr< llarp::thread::ThreadPool > diskworker)
: disk(std::move(diskworker))
explicit llarp_nodedb(std::shared_ptr< llarp::thread::ThreadPool > diskworker,
const std::string rootdir)
: disk(std::move(diskworker)), nodePath(rootdir)
{
}
@ -52,6 +54,10 @@ struct llarp_nodedb
std::shared_ptr< llarp::thread::ThreadPool > disk;
mutable llarp::util::Mutex access; // protects entries
/// time for next save to disk event, 0 if never happened
llarp_time_t m_NextSaveToDisk = 0;
/// how often to save to disk
const llarp_time_t m_SaveInterval = 60 * 5 * 1000;
struct NetDBEntry
{
@ -67,6 +73,10 @@ struct llarp_nodedb
NetDBMap_t entries GUARDED_BY(access);
fs::path nodePath;
/// return true if we should save our nodedb to disk
bool
ShouldSaveToDisk(llarp_time_t now = 0) const;
bool
Remove(const llarp::RouterID &pk) LOCKS_EXCLUDED(access);
@ -87,12 +97,12 @@ struct llarp_nodedb
std::string
getRCFilePath(const llarp::RouterID &pubkey) const;
/// insert and write to disk
/// insert without writing to disk
bool
Insert(const llarp::RouterContact &rc) LOCKS_EXCLUDED(access);
/// unconditional insert and write to disk in background
/// updates the inserted time of the entry
/// invokes Insert() asynchronously with an optional completion
/// callback
void
InsertAsync(llarp::RouterContact rc,
std::shared_ptr< llarp::Logic > l = nullptr,
@ -127,7 +137,8 @@ struct llarp_nodedb
set_dir(const char *dir);
ssize_t
load_dir(const char *dir);
LoadAll();
ssize_t
store_dir(const char *dir);

View File

@ -39,12 +39,16 @@ namespace llarp
bool
PathContext::CheckPathLimitHitByIP(const llarp::Addr& ip)
{
#ifdef TESTNET
return false;
#else
llarp::Addr remote = ip;
// set port to zero
remote.port(0);
// try inserting remote address by ip into decaying hash set
// if it cannot insert it has hit a limit
return not m_PathLimits.Insert(remote);
#endif
}
std::shared_ptr< Logic >

View File

@ -37,7 +37,8 @@ namespace llarp
SetRouterWhitelist(const std::vector< RouterID > &routers) = 0;
virtual void
GetRC(const RouterID &router, RCRequestCallback callback) = 0;
GetRC(const RouterID &router, RCRequestCallback callback,
bool forceLookup = false) = 0;
virtual bool
RemoteIsAllowed(const RouterID &remote) const = 0;

View File

@ -10,9 +10,11 @@
#include <util/thread/threading.hpp>
#include <nodedb.hpp>
#include <dht/context.hpp>
#include <router/abstractrouter.hpp>
#include <iterator>
#include <functional>
#include <random>
namespace llarp
{
@ -47,21 +49,30 @@ namespace llarp
" routers");
}
bool
RCLookupHandler::HaveReceivedWhitelist()
{
util::Lock l(&_mutex);
return whitelistRouters.empty();
}
void
RCLookupHandler::GetRC(const RouterID &router, RCRequestCallback callback)
RCLookupHandler::GetRC(const RouterID &router, RCRequestCallback callback,
bool forceLookup)
{
RouterContact remoteRC;
if(_nodedb->Get(router, remoteRC))
if(not forceLookup)
{
if(callback)
if(_nodedb->Get(router, remoteRC))
{
callback(router, &remoteRC, RCRequestResult::Success);
if(callback)
{
callback(router, &remoteRC, RCRequestResult::Success);
}
FinalizeRequest(router, &remoteRC, RCRequestResult::Success);
return;
}
FinalizeRequest(router, &remoteRC, RCRequestResult::Success);
return;
}
bool shouldDoLookup = false;
{
@ -212,7 +223,7 @@ namespace llarp
for(const auto &router : routersToLookUp)
{
GetRC(router, nullptr);
GetRC(router, nullptr, true);
}
_nodedb->RemoveStaleRCs(_bootstrapRouterIDList,
@ -235,6 +246,35 @@ namespace llarp
LogError("we have no bootstrap nodes specified");
}
if(useWhitelist)
{
static constexpr size_t LookupPerTick = 25;
std::vector< RouterID > lookupRouters;
lookupRouters.reserve(LookupPerTick);
{
// if we are using a whitelist look up a few routers we don't have
util::Lock l(&_mutex);
for(const auto &r : whitelistRouters)
{
if(_nodedb->Has(r))
continue;
lookupRouters.emplace_back(r);
}
}
if(lookupRouters.size() > LookupPerTick)
{
static std::mt19937_64 rng{std::random_device{}()};
std::shuffle(lookupRouters.begin(), lookupRouters.end(), rng);
lookupRouters.resize(LookupPerTick);
}
for(const auto &r : lookupRouters)
GetRC(r, nullptr, true);
return;
}
// TODO: only explore via random subset
// explore via every connected peer
_linkManager->ForEachPeer([&](ILinkSession *s) {

View File

@ -40,9 +40,12 @@ namespace llarp
SetRouterWhitelist(const std::vector< RouterID > &routers) override
LOCKS_EXCLUDED(_mutex);
bool
HaveReceivedWhitelist();
void
GetRC(const RouterID &router, RCRequestCallback callback) override
LOCKS_EXCLUDED(_mutex);
GetRC(const RouterID &router, RCRequestCallback callback,
bool forceLookup = false) override LOCKS_EXCLUDED(_mutex);
bool
RemoteIsAllowed(const RouterID &remote) const override

View File

@ -679,7 +679,7 @@ namespace llarp
LogError("Failed to update our RC");
}
if(isSvcNode)
if(isSvcNode && _rcLookupHandler.HaveReceivedWhitelist())
{
// remove RCs for nodes that are no longer allowed by network policy
nodedb()->RemoveIf([&](const RouterContact &rc) -> bool {
@ -696,18 +696,9 @@ namespace llarp
{
connected += _linkManager.NumberOfPendingConnections();
}
const size_t N = nodedb()->num_loaded();
if(N < llarp::path::default_len)
{
LogInfo("We need at least ", llarp::path::default_len,
" service nodes to build paths but we have ", N, " in nodedb");
_rcLookupHandler.ExploreNetwork();
}
else if(isSvcNode)
{
_rcLookupHandler.ExploreNetwork();
}
_rcLookupHandler.ExploreNetwork();
size_t connectToNum = _outboundSessionMaker.minConnectedRouters;
const auto strictConnect = _rcLookupHandler.NumberOfStrictConnectRouters();
if(strictConnect > 0 && connectToNum > strictConnect)
@ -727,12 +718,17 @@ namespace llarp
if(rpcCaller)
rpcCaller->Tick(now);
// save profiles async
// save profiles
if(routerProfiling().ShouldSave(now))
{
diskworker()->addJob(
[&]() { routerProfiling().Save(routerProfilesFile.c_str()); });
}
// save nodedb
if(nodedb()->ShouldSaveToDisk(now))
{
nodedb()->AsyncFlushToDisk();
}
// get connected peers
std::set< dht::Key_t > peersWeHave;
@ -1014,6 +1010,16 @@ namespace llarp
return false;
}
{
ssize_t loaded = _nodedb->LoadAll();
llarp::LogInfo("loaded ", loaded, " RCs");
if(loaded < 0)
{
// shouldn't be possible
return false;
}
}
llarp_dht_context_start(dht(), pubkey());
for(const auto &rc : bootstrapRCList)

View File

@ -334,7 +334,7 @@ namespace llarp
}
if(!VerifySignature())
{
llarp::LogError("invalid signature");
llarp::LogError("invalid signature: ", *this);
return false;
}
return true;

View File

@ -423,18 +423,18 @@ namespace llarp
bool
Start(const std::string& addr)
{
uint16_t port = 0;
auto idx = addr.find_first_of(':');
Addr netaddr;
if(idx != std::string::npos)
{
port = std::stoi(addr.substr(1 + idx));
netaddr = Addr(addr.substr(0, idx));
}
sockaddr_in saddr;
saddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
saddr.sin_family = AF_INET;
saddr.sin_port = htons(port);
saddr.sin_port = 0;
auto idx = addr.find_first_of(':');
if(idx != std::string::npos)
{
Addr netaddr{addr.substr(0, idx), addr.substr(1 + idx)};
saddr.sin_addr.s_addr = netaddr.ton();
saddr.sin_port = htons(netaddr.port());
}
return _handler.ServeAsync(router->netloop(), router->logic(),
(const sockaddr*)&saddr);
}

View File

@ -733,11 +733,11 @@ namespace llarp
{
if(msg->R.size())
{
auto* job = new llarp_async_verify_rc;
job->nodedb = Router()->nodedb();
job->cryptoworker = Router()->threadpool();
job->diskworker = Router()->diskworker();
job->logic = Router()->logic();
llarp_async_verify_rc* job = new llarp_async_verify_rc();
job->nodedb = Router()->nodedb();
job->cryptoworker = Router()->threadpool();
job->diskworker = Router()->diskworker();
job->logic = Router()->logic();
job->hook = std::bind(&Endpoint::HandleVerifyGotRouter, this, msg,
std::placeholders::_1);
job->rc = msg->R[0];
@ -1009,9 +1009,7 @@ namespace llarp
if(lookups.count(remote) >= MaxConcurrentLookups)
{
LogWarn(Name(), " has too many pending service lookups for ",
remote.ToString());
return false;
path = PickRandomEstablishedPath();
}
using namespace std::placeholders;
@ -1263,7 +1261,7 @@ namespace llarp
}
m_state->m_PendingTraffic.erase(r);
},
5000);
5000, false);
}
bool

View File

@ -36,7 +36,7 @@ namespace llarp
/// determine if this request has timed out
bool
IsTimedOut(llarp_time_t now, llarp_time_t timeout = 60000) const
IsTimedOut(llarp_time_t now, llarp_time_t timeout = 20000) const
{
if(now <= m_created)
return false;

View File

@ -48,7 +48,7 @@ namespace llarp
{
SwapIntros();
}
UpdateIntroSet(true);
UpdateIntroSet(false);
}
return true;
}
@ -196,8 +196,8 @@ namespace llarp
m_Endpoint->RouterLogic(), remoteIdent, m_Endpoint->GetIdentity(),
currentIntroSet.K, remoteIntro, m_DataHandler, currentConvoTag, t);
ex->hook =
std::bind(&OutboundContext::Send, this, std::placeholders::_1, path);
ex->hook = std::bind(&OutboundContext::Send, shared_from_this(),
std::placeholders::_1, path);
ex->msg.PutBuffer(payload);
ex->msg.introReply = path->intro;
@ -236,7 +236,8 @@ namespace llarp
if(path)
{
HiddenServiceAddressLookup* job = new HiddenServiceAddressLookup(
m_Endpoint, util::memFn(&OutboundContext::OnIntroSetUpdate, this),
m_Endpoint,
util::memFn(&OutboundContext::OnIntroSetUpdate, shared_from_this()),
addr, m_Endpoint->GenTXID());
updatingIntroSet = job->SendRequestViaPath(path, m_Endpoint->Router());
@ -300,7 +301,7 @@ namespace llarp
}
if(currentIntroSet.HasExpiredIntros(now))
{
UpdateIntroSet(true);
UpdateIntroSet(false);
}
// send control message if we look too quiet
if(lastGoodSend)
@ -412,7 +413,7 @@ namespace llarp
{
// update introset
LogInfo(Name(), " updating introset");
UpdateIntroSet(true);
UpdateIntroSet(false);
return true;
}
return false;
@ -487,7 +488,7 @@ namespace llarp
OutboundContext::HandlePathDied(path::Path_ptr path)
{
// unconditionally update introset
UpdateIntroSet(true);
UpdateIntroSet(false);
const RouterID endpoint(path->Endpoint());
// if a path to our current intro died...
if(endpoint == remoteIntro.router)