make format and revert stupid alloc changes

pull/617/head
Jeff Becker 5 years ago
parent 4862dec4be
commit 73f1d34239
No known key found for this signature in database
GPG Key ID: F357B3B42F6F9B05

@ -653,7 +653,8 @@ namespace llarp
{
llarp::LogError(
"not enough dht nodes to handle exploritory router lookup, "
"have ", nodeCount, " dht peers");
"have ",
nodeCount, " dht peers");
return false;
}
for(const auto& f : found)

@ -164,7 +164,7 @@ llarp_ev_add_tun(struct llarp_ev_loop *loop, struct llarp_tun_io *tun)
llarp::LogDebug("IfAddr: ", tun->ifaddr);
llarp::LogDebug("IfName: ", tun->ifname);
llarp::LogDebug("IfNMsk: ", tun->netmask);
#ifndef _WIN32
#ifndef _WIN32
auto dev = loop->create_tun(tun);
tun->impl = dev;
if(dev)
@ -173,7 +173,7 @@ llarp_ev_add_tun(struct llarp_ev_loop *loop, struct llarp_tun_io *tun)
}
#else
UNREFERENCED_PARAMETER(loop);
auto dev = new win32_tun_io(tun);
auto dev = new win32_tun_io(tun);
tun->impl = dev;
// We're not even going to add this to the socket event loop
if(dev)
@ -181,7 +181,7 @@ llarp_ev_add_tun(struct llarp_ev_loop *loop, struct llarp_tun_io *tun)
dev->setup();
return dev->add_ev(); // start up tun and add to event queue
}
#endif
#endif
llarp::LogWarn("Loop could not create tun");
return false;
}
@ -194,7 +194,7 @@ llarp_ev_tun_async_write(struct llarp_tun_io *tun, const llarp_buffer_t &buf)
llarp::LogWarn("packet too big, ", buf.sz, " > ", EV_WRITE_BUF_SZ);
return false;
}
#ifndef _WIN32
#ifndef _WIN32
return static_cast< llarp::tun * >(tun->impl)->queue_write(buf.base, buf.sz);
#else
return static_cast< win32_tun_io * >(tun->impl)->queue_write(buf.base,

@ -258,7 +258,7 @@ namespace llarp
Pending m_Pending GUARDED_BY(m_PendingMutex);
};
using LinkLayer_ptr = std::shared_ptr<ILinkLayer>;
using LinkLayer_ptr = std::shared_ptr< ILinkLayer >;
} // namespace llarp
#endif

@ -116,7 +116,7 @@ struct TryConnectJob
};
static void
on_try_connecting(std::shared_ptr<TryConnectJob> j)
on_try_connecting(std::shared_ptr< TryConnectJob > j)
{
if(j->Attempt())
j->router->pendingEstablishJobs.erase(j->rc.pubkey);
@ -191,8 +191,8 @@ namespace llarp
{
if(!link->IsCompatable(remote))
continue;
std::shared_ptr< TryConnectJob > job = std::make_shared< TryConnectJob >(
remote, link, numretries, this);
std::shared_ptr< TryConnectJob > job =
std::make_shared< TryConnectJob >(remote, link, numretries, this);
auto itr = pendingEstablishJobs.emplace(remote.pubkey, job);
if(itr.second)
{
@ -1961,8 +1961,8 @@ namespace llarp
if(outboundLinks.size() > 0)
return true;
static std::list< std::function< LinkLayer_ptr(Router *) > >
linkFactories = {utp::NewServerFromRouter, iwp::NewServerFromRouter};
static std::list< std::function< LinkLayer_ptr(Router *) > > linkFactories =
{utp::NewServerFromRouter, iwp::NewServerFromRouter};
for(const auto &factory : linkFactories)
{

@ -1211,8 +1211,8 @@ namespace llarp
const auto dlt = intro.expiresAt - now;
return should
|| ( // try spacing tunnel builds out evenly in time
(dlt <= (path::default_lifetime / 4)) &&
(NumInStatus(path::ePathBuilding) < m_NumPaths));
(dlt <= (path::default_lifetime / 4))
&& (NumInStatus(path::ePathBuilding) < m_NumPaths));
}
Logic*

@ -1,116 +0,0 @@
#ifndef LLARP_UTIL_ALLOC_HPP
#define LLARP_UTIL_ALLOC_HPP
#include <bitset>
#include <array>
namespace llarp
{
namespace util
{
/// simple single threaded allocatable super type template
template < typename Value_t, std::size_t maxEntries >
struct AllocPool
{
using Ptr_t = Value_t *;
AllocPool()
{
mem = nullptr;
}
~AllocPool()
{
// delete mem;
}
Ptr_t
NewPtr()
{
/*
Ptr_t ptr = mem->allocate();
::new(ptr) Value_t;
return ptr;
*/
return new Value_t();
}
void
DelPtr(Ptr_t p)
{
/*
p->~Value_t();
mem->deallocate(p);
*/
delete p;
}
bool
Full() const
{
/*
return mem->full();
*/
return false;
}
bool
HasRoomFor(size_t numItems)
{
return true;
/* return mem->hasRoomFor(numItems); */
}
private:
struct Memory
{
uint8_t _buffer[maxEntries * sizeof(Value_t)];
std::bitset< maxEntries > _allocated = {0};
std::size_t _pos = 0;
bool
full() const
{
return _allocated.size() == _allocated.count();
}
bool
hasRoomFor(size_t num)
{
return _allocated.count() + num <= _allocated.size();
}
void
deallocate(void *ptr)
{
if(ptr == nullptr)
throw std::bad_alloc();
uint8_t *v_ptr = (uint8_t *)ptr;
const std::size_t _idx = (v_ptr - _buffer) / sizeof(Value_t);
_allocated.reset(_idx);
}
[[nodiscard]] Ptr_t
allocate()
{
const std::size_t _started = _pos;
while(_allocated.test(_pos))
{
_pos = (_pos + 1) % maxEntries;
if(_pos == _started)
{
// we are full
throw std::bad_alloc();
}
}
_allocated.set(_pos);
return (Ptr_t)&_buffer[_pos * sizeof(Value_t)];
}
};
Memory *mem;
};
} // namespace util
} // namespace llarp
#endif

@ -1,23 +1,23 @@
#include <utp/inbound_message.hpp>
#include <string.h>
#include <cstring>
namespace llarp
{
namespace utp
{
bool
_InboundMessage::IsExpired(llarp_time_t now) const
InboundMessage::IsExpired(llarp_time_t now) const
{
return now > lastActive && now - lastActive >= 2000;
}
bool
_InboundMessage::AppendData(const byte_t* ptr, uint16_t sz)
InboundMessage::AppendData(const byte_t* ptr, uint16_t sz)
{
if(buffer.size_left() < sz)
return false;
memcpy(buffer.cur, ptr, sz);
std::copy_n(ptr, sz, buffer.cur);
buffer.cur += sz;
return true;
}

@ -6,10 +6,7 @@
#include <util/types.hpp>
#include <utp_types.h> // for uint32
#include <string.h>
#include <util/alloc.hpp>
#include <cstring>
namespace llarp
{
@ -48,7 +45,7 @@ namespace llarp
using MessageBuffer = AlignedBuffer< MAX_LINK_MSG_SIZE >;
/// pending inbound message being received
struct _InboundMessage
struct InboundMessage
{
/// timestamp of last activity
llarp_time_t lastActive;
@ -69,19 +66,22 @@ namespace llarp
bool
AppendData(const byte_t* ptr, uint16_t sz);
_InboundMessage() : lastActive(0), _msg(), buffer(_msg)
InboundMessage() : lastActive(0), _msg(), buffer(_msg)
{
}
InboundMessage(const InboundMessage& other)
: lastActive(other.lastActive), _msg(other._msg), buffer(_msg)
{
}
};
inline bool
operator==(const _InboundMessage& lhs, const _InboundMessage& rhs)
operator==(const InboundMessage& lhs, const InboundMessage& rhs)
{
return lhs.buffer.base == rhs.buffer.base;
}
using InboundMessage = std::shared_ptr< _InboundMessage >;
} // namespace utp
} // namespace llarp

@ -9,12 +9,6 @@ namespace llarp
{
namespace utp
{
using SendBufferPool = util::AllocPool< FragmentBuffer, 1024 * 4 >;
using RecvBufferPool = util::AllocPool< _InboundMessage, 1024 >;
static SendBufferPool OBPool;
static RecvBufferPool IBPool;
using namespace std::placeholders;
void
@ -81,7 +75,7 @@ namespace llarp
auto itr = m_RecvMsgs.begin();
while(itr != m_RecvMsgs.end())
{
if(itr->second->IsExpired(now))
if(itr->second.IsExpired(now))
{
itr = m_RecvMsgs.erase(itr);
}
@ -275,12 +269,7 @@ namespace llarp
// this means we're stalled
return false;
}
size_t sz = buf.sz;
if(!OBPool.HasRoomFor(sz / FragmentBodyPayloadSize))
{
LogError("Send buffers are full");
return false;
}
size_t sz = buf.sz;
byte_t* ptr = buf.base;
uint32_t msgid = m_NextTXMsgID++;
while(sz)
@ -380,15 +369,14 @@ namespace llarp
uint16_t remaining)
{
sendq.emplace_back(OBPool.NewPtr(),
[](FragmentBuffer* ptr) { OBPool.DelPtr(ptr); });
sendq.emplace_back();
auto& buf = sendq.back();
vecq.emplace_back();
auto& vec = vecq.back();
vec.iov_base = buf->data();
vec.iov_base = buf.data();
vec.iov_len = FragmentBufferSize;
buf->Randomize();
byte_t* noncePtr = buf->data() + FragmentHashSize;
buf.Randomize();
byte_t* noncePtr = buf.data() + FragmentHashSize;
byte_t* body = noncePtr + FragmentNonceSize;
byte_t* base = body;
AlignedBuffer< 24 > A(base);
@ -419,7 +407,7 @@ namespace llarp
payload.cur = payload.base;
payload.sz = FragmentBufferSize - FragmentHashSize;
// key'd hash
if(!OurCrypto()->hmac(buf->data(), payload, txKey))
if(!OurCrypto()->hmac(buf.data(), payload, txKey))
return false;
return MutateKey(txKey, A);
}
@ -556,22 +544,14 @@ namespace llarp
// get message
if(m_RecvMsgs.find(msgid) == m_RecvMsgs.end())
{
if(IBPool.Full())
{
LogError("inbound buffer mempool full");
return false;
}
m_RecvMsgs.emplace(
msgid, InboundMessage(IBPool.NewPtr(), [](_InboundMessage* m) {
IBPool.DelPtr(m);
}));
m_RecvMsgs.emplace(msgid, InboundMessage());
}
auto itr = m_RecvMsgs.find(msgid);
// add message activity
itr->second->lastActive = parent->Now();
itr->second.lastActive = parent->Now();
// append data
if(!itr->second->AppendData(out.cur, length))
if(!itr->second.AppendData(out.cur, length))
{
LogError("inbound buffer is full");
return false; // not enough room
@ -586,8 +566,8 @@ namespace llarp
if(remaining == 0)
{
// we done with this guy, prune next tick
itr->second->lastActive = 0;
ManagedBuffer buf(itr->second->buffer);
itr->second.lastActive = 0;
ManagedBuffer buf{itr->second.buffer};
// resize
buf.underlying.sz = buf.underlying.cur - buf.underlying.base;
// rewind

@ -14,8 +14,6 @@ namespace llarp
{
struct LinkLayer;
using SendFragmentBuffer = std::shared_ptr< FragmentBuffer >;
struct Session : public ILinkSession
{
/// remote router's rc
@ -44,7 +42,7 @@ namespace llarp
/// send queue for utp
std::deque< utp_iovec > vecq;
/// tx fragment queue
std::deque< SendFragmentBuffer > sendq;
std::deque< FragmentBuffer > sendq;
/// current rx fragment buffer
FragmentBuffer recvBuf;
/// current offset in current rx fragment buffer

@ -15,8 +15,8 @@ namespace llarp
SessionRenegotiateHandler reneg, SignBufferFunc sign,
TimeoutHandler timeout, SessionClosedHandler closed)
{
return std::make_shared< LinkLayer >(crypto, routerEncSecret, getrc, h, sign, est, reneg,
timeout, closed);
return std::make_shared< LinkLayer >(crypto, routerEncSecret, getrc, h,
sign, est, reneg, timeout, closed);
}
LinkLayer_ptr

Loading…
Cancel
Save