recuce number of jobs pushed through omq proxy thread.

when we queue cpu heavy work in lokinet to worker threads we make 1
job per function call. we call a lot of jobs so this coleases the jobs
into 1 job that we push off at the end of the event loop cycle,
reducing the number of jobs going across the omq proxy thread, in
theory reducing cpu usage.
pull/2134/head
Jeff Becker 1 year ago
parent 366d0c1be9
commit bbbb07d01d
No known key found for this signature in database
GPG Key ID: 025C02EE3A092F2D

@ -72,6 +72,7 @@ namespace llarp
_lastTick = llarp::time_now_ms();
m_NextExploreAt = Clock_t::now();
m_Pump = _loop->make_waker([this]() { PumpLL(); });
m_Work = _loop->make_waker([this]() { submit_work(); });
}
Router::~Router()
@ -79,6 +80,15 @@ namespace llarp
llarp_dht_context_free(_dht);
}
void
Router::submit_work()
{
m_lmq->job([work = std::move(m_WorkJobs)]() {
for (const auto& job : work)
job();
});
}
void
Router::PumpLL()
{
@ -1631,7 +1641,10 @@ namespace llarp
void
Router::QueueWork(std::function<void(void)> func)
{
m_lmq->job(std::move(func));
_loop->call([this, func = std::move(func)]() mutable {
m_WorkJobs.push_back(std::move(func));
m_Work->Trigger();
});
}
void

@ -78,6 +78,12 @@ namespace llarp
path::BuildLimiter m_PathBuildLimiter;
std::shared_ptr<EventLoopWakeup> m_Pump;
std::shared_ptr<EventLoopWakeup> m_Work;
std::vector<std::function<void()>> m_WorkJobs;
/// submits cpu heavy work from last event loop tick cycle to worker threads.
void
submit_work();
path::BuildLimiter&
pathBuildLimiter() override
@ -196,9 +202,11 @@ namespace llarp
return _vpnPlatform.get();
}
/// queue functionally pure cpu heavy work to be done in another thread.
void
QueueWork(std::function<void(void)> func) override;
/// queue disk io bound work to be done in the disk io thread.
void
QueueDiskIO(std::function<void(void)> func) override;

Loading…
Cancel
Save