1
1
Fork 0
mirror of https://github.com/oxen-io/lokinet synced 2023-12-14 06:53:00 +01:00
lokinet/llarp/ev/ev_libuv.cpp

1140 lines
25 KiB
C++
Raw Normal View History

2019-08-02 11:27:27 +02:00
#include <ev/ev_libuv.hpp>
#include <util/thread/logic.hpp>
#include <util/thread/queue.hpp>
2019-08-02 11:27:27 +02:00
2019-07-31 01:42:13 +02:00
#include <cstring>
2019-06-02 23:17:05 +02:00
namespace libuv
{
#define LoopCall(h, ...) \
{ \
auto __f = __VA_ARGS__; \
__f(); \
}
2019-06-20 20:28:26 +02:00
struct glue
{
2019-06-21 15:00:17 +02:00
virtual ~glue() = default;
2019-06-20 20:28:26 +02:00
virtual void
Close() = 0;
};
2019-06-02 23:17:05 +02:00
/// tcp connection glue between llarp and libuv
2019-06-20 20:28:26 +02:00
struct conn_glue : public glue
2019-06-02 23:17:05 +02:00
{
using WriteBuffer_t = std::vector<char>;
struct WriteEvent
{
WriteBuffer_t data;
uv_write_t request;
WriteEvent() = default;
2020-01-07 12:59:17 +01:00
explicit WriteEvent(size_t sz, char* ptr)
{
request.data = this;
2020-01-07 12:59:17 +01:00
data.resize(sz);
std::copy_n(ptr, sz, data.begin());
}
uv_buf_t
Buffer()
{
return uv_buf_init(data.data(), data.size());
}
uv_write_t*
Request()
{
return &request;
}
};
2019-06-02 23:17:05 +02:00
uv_tcp_t m_Handle;
uv_connect_t m_Connect;
2019-06-20 20:28:26 +02:00
uv_check_t m_Ticker;
2019-06-02 23:17:05 +02:00
llarp_tcp_connecter* const m_TCP;
llarp_tcp_acceptor* const m_Accept;
llarp_tcp_conn m_Conn;
2020-05-06 22:38:44 +02:00
llarp::SockAddr m_Addr;
2019-06-03 16:03:59 +02:00
2020-05-06 22:38:44 +02:00
conn_glue(uv_loop_t* loop, llarp_tcp_connecter* tcp, const llarp::SockAddr& addr)
: m_TCP(tcp), m_Accept(nullptr), m_Addr(addr)
2019-06-02 23:17:05 +02:00
{
m_Connect.data = this;
m_Handle.data = this;
m_TCP->impl = this;
2019-06-02 23:17:05 +02:00
uv_tcp_init(loop, &m_Handle);
2019-06-03 14:02:54 +02:00
m_Ticker.data = this;
2019-06-20 20:28:26 +02:00
uv_check_init(loop, &m_Ticker);
2020-01-07 12:59:17 +01:00
m_Conn.close = &ExplicitClose;
m_Conn.write = &ExplicitWrite;
2019-06-02 23:17:05 +02:00
}
2020-05-06 22:38:44 +02:00
conn_glue(uv_loop_t* loop, llarp_tcp_acceptor* tcp, const llarp::SockAddr& addr)
: m_TCP(nullptr), m_Accept(tcp), m_Addr(addr)
2019-06-02 23:17:05 +02:00
{
2019-06-20 20:28:26 +02:00
m_Connect.data = nullptr;
m_Handle.data = this;
2019-06-02 23:17:05 +02:00
uv_tcp_init(loop, &m_Handle);
2019-06-03 14:02:54 +02:00
m_Ticker.data = this;
2019-06-20 20:28:26 +02:00
uv_check_init(loop, &m_Ticker);
2020-01-07 12:59:17 +01:00
m_Accept->close = &ExplicitCloseAccept;
m_Conn.write = nullptr;
m_Conn.closed = nullptr;
m_Conn.tick = nullptr;
2019-06-02 23:17:05 +02:00
}
2020-01-07 12:59:17 +01:00
conn_glue(conn_glue* parent) : m_TCP(nullptr), m_Accept(nullptr)
2019-06-02 23:17:05 +02:00
{
2019-06-20 20:28:26 +02:00
m_Connect.data = nullptr;
m_Conn.close = &ExplicitClose;
m_Conn.write = &ExplicitWrite;
m_Handle.data = this;
2019-06-02 23:17:05 +02:00
uv_tcp_init(parent->m_Handle.loop, &m_Handle);
2019-06-03 14:02:54 +02:00
m_Ticker.data = this;
2019-06-20 20:28:26 +02:00
uv_check_init(parent->m_Handle.loop, &m_Ticker);
2019-06-02 23:17:05 +02:00
}
static void
OnOutboundConnect(uv_connect_t* c, int status)
{
conn_glue* self = static_cast<conn_glue*>(c->data);
2020-01-06 21:52:04 +01:00
self->HandleConnectResult(status);
2019-06-20 20:28:26 +02:00
c->data = nullptr;
2019-06-02 23:17:05 +02:00
}
bool
ConnectAsync()
{
return uv_tcp_connect(&m_Connect, &m_Handle, m_Addr, &OnOutboundConnect) != -1;
2019-06-02 23:17:05 +02:00
}
static void
ExplicitClose(llarp_tcp_conn* conn)
{
static_cast<conn_glue*>(conn->impl)->Close();
2019-06-02 23:17:05 +02:00
}
2019-06-03 16:03:59 +02:00
static void
ExplicitCloseAccept(llarp_tcp_acceptor* tcp)
{
static_cast<conn_glue*>(tcp->impl)->Close();
2019-06-03 16:03:59 +02:00
}
2019-06-02 23:17:05 +02:00
2019-06-03 14:02:54 +02:00
static ssize_t
ExplicitWrite(llarp_tcp_conn* conn, const byte_t* ptr, size_t sz)
{
return static_cast<conn_glue*>(conn->impl)->WriteAsync((char*)ptr, sz);
2019-06-03 14:02:54 +02:00
}
2019-06-02 23:17:05 +02:00
static void
OnRead(uv_stream_t* stream, ssize_t nread, const uv_buf_t* buf)
{
if (nread >= 0)
2019-09-05 23:28:50 +02:00
{
auto* conn = static_cast<conn_glue*>(stream->data);
2020-01-06 21:52:04 +01:00
conn->Read(buf->base, nread);
2019-09-05 23:28:50 +02:00
}
else if (nread < 0)
2019-09-05 23:28:50 +02:00
{
static_cast<conn_glue*>(stream->data)->Close();
2019-09-05 23:28:50 +02:00
}
2019-06-02 23:17:05 +02:00
delete[] buf->base;
}
static void
Alloc(uv_handle_t*, size_t suggested_size, uv_buf_t* buf)
{
buf->base = new char[suggested_size];
buf->len = suggested_size;
2019-06-02 23:17:05 +02:00
}
void
Read(const char* ptr, ssize_t sz)
{
if (m_Conn.read)
2019-06-03 16:03:59 +02:00
{
llarp::LogDebug("tcp read ", sz, " bytes");
const llarp_buffer_t buf(ptr, sz);
m_Conn.read(&m_Conn, buf);
}
2019-06-02 23:17:05 +02:00
}
void
HandleConnectResult(int status)
{
if (m_TCP && m_TCP->connected)
2019-06-02 23:17:05 +02:00
{
if (status == 0)
2019-06-02 23:17:05 +02:00
{
m_Conn.impl = this;
m_Conn.loop = m_TCP->loop;
2019-06-02 23:17:05 +02:00
m_Conn.close = &ExplicitClose;
2019-06-03 14:02:54 +02:00
m_Conn.write = &ExplicitWrite;
2019-06-02 23:17:05 +02:00
m_TCP->connected(m_TCP, &m_Conn);
2019-06-03 14:02:54 +02:00
Start();
2019-06-02 23:17:05 +02:00
}
else if (m_TCP->error)
2019-06-03 16:03:59 +02:00
{
llarp::LogError("failed to connect tcp ", uv_strerror(status));
2019-06-02 23:17:05 +02:00
m_TCP->error(m_TCP);
2019-06-03 16:03:59 +02:00
}
2019-06-02 23:17:05 +02:00
}
}
void
WriteFail()
{
if (m_Conn.close)
2019-06-03 16:03:59 +02:00
m_Conn.close(&m_Conn);
}
uv_stream_t*
Stream()
{
return (uv_stream_t*)&m_Handle;
2019-06-02 23:17:05 +02:00
}
static void
OnWritten(uv_write_t* req, int status)
{
WriteEvent* ev = static_cast<WriteEvent*>(req->data);
if (status == 0)
2019-06-03 16:03:59 +02:00
{
llarp::LogDebug("wrote ", ev->data.size());
2019-06-03 16:03:59 +02:00
}
2019-06-20 20:28:26 +02:00
else
{
llarp::LogDebug("write fail");
}
delete ev;
2019-06-02 23:17:05 +02:00
}
int
WriteAsync(char* data, size_t sz)
2019-06-02 23:17:05 +02:00
{
if (uv_is_closing((const uv_handle_t*)&m_Handle))
return -1;
2020-01-07 12:59:17 +01:00
WriteEvent* ev = new WriteEvent(sz, data);
auto buf = ev->Buffer();
if (uv_write(ev->Request(), Stream(), &buf, 1, &OnWritten) == 0)
2020-01-07 12:59:17 +01:00
return sz;
delete ev;
return -1;
2019-06-02 23:17:05 +02:00
}
static void
OnClosed(uv_handle_t* h)
{
conn_glue* conn = static_cast<conn_glue*>(h->data);
2020-01-07 12:59:17 +01:00
conn->HandleClosed();
2019-06-02 23:17:05 +02:00
}
2019-06-20 20:28:26 +02:00
static void
FullClose(uv_handle_t* h)
{
auto* self = static_cast<conn_glue*>(h->data);
h->data = nullptr;
2019-06-20 20:28:26 +02:00
delete self;
llarp::LogDebug("deleted");
2019-06-20 20:28:26 +02:00
}
2019-06-02 23:17:05 +02:00
void
HandleClosed()
{
m_Handle.data = nullptr;
if (m_Accept)
2019-06-03 16:03:59 +02:00
{
if (m_Accept->closed)
2019-06-20 20:28:26 +02:00
m_Accept->closed(m_Accept);
2019-06-03 16:03:59 +02:00
m_Accept->impl = nullptr;
}
if (m_Conn.closed)
2019-06-03 16:03:59 +02:00
{
2019-06-02 23:17:05 +02:00
m_Conn.closed(&m_Conn);
2019-06-03 16:03:59 +02:00
}
2019-06-20 20:28:26 +02:00
m_Conn.impl = nullptr;
llarp::LogDebug("closed");
2019-06-20 20:28:26 +02:00
uv_close((uv_handle_t*)&m_Ticker, &FullClose);
}
static void
OnShutdown(uv_shutdown_t* shut, int code)
{
llarp::LogDebug("shut down ", code);
auto* self = static_cast<conn_glue*>(shut->data);
2019-06-20 20:28:26 +02:00
uv_close((uv_handle_t*)&self->m_Handle, &OnClosed);
delete shut;
2019-06-02 23:17:05 +02:00
}
void
2019-06-20 20:28:26 +02:00
Close() override
2019-06-02 23:17:05 +02:00
{
if (uv_is_closing((uv_handle_t*)Stream()))
return;
2019-06-03 16:03:59 +02:00
llarp::LogDebug("close tcp connection");
2019-06-20 20:28:26 +02:00
uv_check_stop(&m_Ticker);
uv_read_stop(Stream());
2020-01-07 12:59:17 +01:00
auto* shut = new uv_shutdown_t();
shut->data = this;
uv_shutdown(shut, Stream(), &OnShutdown);
2019-06-02 23:17:05 +02:00
}
static void
OnAccept(uv_stream_t* stream, int status)
{
if (status == 0)
2019-06-02 23:17:05 +02:00
{
conn_glue* conn = static_cast<conn_glue*>(stream->data);
2020-01-07 12:59:17 +01:00
conn->Accept();
2019-06-02 23:17:05 +02:00
}
2019-06-03 16:03:59 +02:00
else
{
llarp::LogError("tcp accept failed: ", uv_strerror(status));
}
2019-06-02 23:17:05 +02:00
}
2019-06-03 14:02:54 +02:00
static void
2019-06-20 20:28:26 +02:00
OnTick(uv_check_t* t)
2019-06-03 14:02:54 +02:00
{
llarp::LogTrace("conn_glue::OnTick() start");
conn_glue* conn = static_cast<conn_glue*>(t->data);
2020-01-07 12:59:17 +01:00
conn->Tick();
llarp::LogTrace("conn_glue::OnTick() end");
2019-06-03 14:02:54 +02:00
}
void
Tick()
{
if (m_Accept && m_Accept->tick)
{
2019-06-03 14:02:54 +02:00
m_Accept->tick(m_Accept);
}
if (m_Conn.tick)
{
2019-06-03 14:02:54 +02:00
m_Conn.tick(&m_Conn);
}
2019-06-03 14:02:54 +02:00
}
void
Start()
{
2019-06-20 20:28:26 +02:00
auto result = uv_check_start(&m_Ticker, &OnTick);
if (result)
2019-06-03 16:03:59 +02:00
llarp::LogError("failed to start timer ", uv_strerror(result));
result = uv_read_start(Stream(), &Alloc, &OnRead);
if (result)
2019-06-03 16:03:59 +02:00
llarp::LogError("failed to start reader ", uv_strerror(result));
2019-06-03 14:02:54 +02:00
}
2019-06-02 23:17:05 +02:00
void
Accept()
{
if (m_Accept && m_Accept->accepted)
2019-06-02 23:17:05 +02:00
{
2019-07-31 01:42:13 +02:00
auto* child = new conn_glue(this);
2019-06-03 16:03:59 +02:00
llarp::LogDebug("accepted new connection");
child->m_Conn.impl = child;
child->m_Conn.loop = m_Accept->loop;
2019-06-03 14:02:54 +02:00
child->m_Conn.close = &ExplicitClose;
child->m_Conn.write = &ExplicitWrite;
auto res = uv_accept(Stream(), child->Stream());
if (res)
2019-06-03 16:03:59 +02:00
{
llarp::LogError("failed to accept tcp connection ", uv_strerror(res));
child->Close();
return;
}
2019-06-02 23:17:05 +02:00
m_Accept->accepted(m_Accept, &child->m_Conn);
2019-06-03 14:02:54 +02:00
child->Start();
2019-06-02 23:17:05 +02:00
}
}
bool
Server()
{
uv_check_start(&m_Ticker, &OnTick);
2019-06-03 16:03:59 +02:00
m_Accept->close = &ExplicitCloseAccept;
return uv_tcp_bind(&m_Handle, m_Addr, 0) == 0 && uv_listen(Stream(), 5, &OnAccept) == 0;
2019-06-02 23:17:05 +02:00
}
};
2019-09-05 19:39:09 +02:00
struct ticker_glue : public glue
{
std::function<void(void)> func;
2019-09-05 19:39:09 +02:00
ticker_glue(uv_loop_t* loop, std::function<void(void)> tick) : func(tick)
2019-09-05 19:39:09 +02:00
{
m_Ticker.data = this;
uv_check_init(loop, &m_Ticker);
}
static void
OnTick(uv_check_t* t)
{
llarp::LogTrace("ticker_glue::OnTick() start");
ticker_glue* ticker = static_cast<ticker_glue*>(t->data);
ticker->func();
Loop* loop = static_cast<Loop*>(t->loop->data);
loop->FlushLogic();
llarp::LogTrace("ticker_glue::OnTick() end");
2019-09-05 19:39:09 +02:00
}
bool
Start()
{
return uv_check_start(&m_Ticker, &OnTick) != -1;
}
void
Close() override
{
uv_check_stop(&m_Ticker);
2019-11-20 20:45:23 +01:00
uv_close((uv_handle_t*)&m_Ticker, [](auto h) {
ticker_glue* self = (ticker_glue*)h->data;
h->data = nullptr;
2019-11-20 20:45:23 +01:00
delete self;
});
2019-09-05 19:39:09 +02:00
}
uv_check_t m_Ticker;
};
2019-06-20 20:28:26 +02:00
struct udp_glue : public glue
2019-06-02 23:17:05 +02:00
{
uv_udp_t m_Handle;
2019-06-04 16:36:54 +02:00
uv_check_t m_Ticker;
2019-06-02 23:17:05 +02:00
llarp_udp_io* const m_UDP;
2020-05-06 22:38:44 +02:00
llarp::SockAddr m_Addr;
std::vector<char> m_Buffer;
2019-06-02 23:17:05 +02:00
2020-05-06 22:38:44 +02:00
udp_glue(uv_loop_t* loop, llarp_udp_io* udp, const llarp::SockAddr& src)
: m_UDP(udp), m_Addr(src)
2019-06-02 23:17:05 +02:00
{
m_Handle.data = this;
m_Ticker.data = this;
uv_udp_init(loop, &m_Handle);
2019-06-04 16:36:54 +02:00
uv_check_init(loop, &m_Ticker);
2019-06-02 23:17:05 +02:00
}
static void
2020-05-22 19:13:58 +02:00
Alloc(uv_handle_t* h, size_t suggested_size, uv_buf_t* buf)
2019-06-02 23:17:05 +02:00
{
2020-05-22 19:13:58 +02:00
udp_glue* self = static_cast<udp_glue*>(h->data);
if (self->m_Buffer.empty())
self->m_Buffer.resize(suggested_size);
buf->base = self->m_Buffer.data();
buf->len = self->m_Buffer.size();
2019-06-02 23:17:05 +02:00
}
2020-05-06 22:38:44 +02:00
/// callback for libuv
2019-06-02 23:17:05 +02:00
static void
2020-05-06 22:38:44 +02:00
OnRecv(uv_udp_t* handle, ssize_t nread, const uv_buf_t* buf, const sockaddr* addr, unsigned)
2019-06-02 23:17:05 +02:00
{
udp_glue* glue = static_cast<udp_glue*>(handle->data);
if (addr)
2020-05-08 19:23:21 +02:00
glue->RecvFrom(nread, buf, llarp::SockAddr(*addr));
2019-06-02 23:17:05 +02:00
}
void
2020-05-06 22:38:44 +02:00
RecvFrom(ssize_t sz, const uv_buf_t* buf, const llarp::SockAddr& fromaddr)
2019-06-02 23:17:05 +02:00
{
if (sz > 0 && m_UDP)
2019-06-02 23:17:05 +02:00
{
const size_t pktsz = sz;
if (m_UDP->recvfrom)
2019-11-03 20:38:34 +01:00
{
const llarp_buffer_t pkt((const byte_t*)buf->base, pktsz);
2019-10-02 15:06:14 +02:00
m_UDP->recvfrom(m_UDP, fromaddr, ManagedBuffer{pkt});
2019-11-03 20:38:34 +01:00
}
2019-06-02 23:17:05 +02:00
}
}
static void
2019-06-04 16:36:54 +02:00
OnTick(uv_check_t* t)
2019-06-02 23:17:05 +02:00
{
llarp::LogTrace("udp_glue::OnTick() start");
udp_glue* udp = static_cast<udp_glue*>(t->data);
udp->Tick();
llarp::LogTrace("udp_glue::OnTick() end");
2019-06-02 23:17:05 +02:00
}
void
Tick()
{
if (m_UDP && m_UDP->tick)
2019-06-03 14:02:54 +02:00
m_UDP->tick(m_UDP);
2019-06-02 23:17:05 +02:00
}
static int
2020-05-06 22:38:44 +02:00
SendTo(llarp_udp_io* udp, const llarp::SockAddr& to, const byte_t* ptr, size_t sz)
2019-06-02 23:17:05 +02:00
{
auto* self = static_cast<udp_glue*>(udp->impl);
if (self == nullptr)
2019-06-20 20:28:26 +02:00
return -1;
2020-06-16 17:16:12 +02:00
auto buf = uv_buf_init((char*)ptr, sz);
return uv_udp_try_send(&self->m_Handle, &buf, 1, to);
2019-06-02 23:17:05 +02:00
}
bool
Bind()
{
auto ret = uv_udp_bind(&m_Handle, m_Addr, 0);
if (ret)
2019-06-02 23:17:05 +02:00
{
llarp::LogError("failed to bind to ", m_Addr, " ", uv_strerror(ret));
return false;
}
if (uv_udp_recv_start(&m_Handle, &Alloc, &OnRecv))
2019-06-02 23:17:05 +02:00
{
llarp::LogError("failed to start recving packets via ", m_Addr);
return false;
}
if (uv_check_start(&m_Ticker, &OnTick))
2019-06-02 23:17:05 +02:00
{
llarp::LogError("failed to start ticker");
return false;
}
2019-12-11 22:18:47 +01:00
#if defined(_WIN32) || defined(_WIN64)
#else
if (uv_fileno((const uv_handle_t*)&m_Handle, &m_UDP->fd))
2019-06-02 23:17:05 +02:00
return false;
2019-12-11 22:18:47 +01:00
#endif
2019-06-02 23:17:05 +02:00
m_UDP->sendto = &SendTo;
m_UDP->impl = this;
2019-06-02 23:17:05 +02:00
return true;
}
static void
OnClosed(uv_handle_t* h)
{
auto* glue = static_cast<udp_glue*>(h->data);
if (glue)
2019-06-20 20:28:26 +02:00
{
h->data = nullptr;
2019-06-20 20:28:26 +02:00
delete glue;
}
2019-06-02 23:17:05 +02:00
}
void
2019-06-20 20:28:26 +02:00
Close() override
2019-06-02 23:17:05 +02:00
{
m_UDP->impl = nullptr;
2019-06-04 16:36:54 +02:00
uv_check_stop(&m_Ticker);
2019-06-02 23:17:05 +02:00
uv_close((uv_handle_t*)&m_Handle, &OnClosed);
}
};
2019-08-22 13:18:05 +02:00
struct pipe_glue : public glue
{
byte_t m_Buffer[1024 * 8];
llarp_ev_pkt_pipe* const m_Pipe;
pipe_glue(uv_loop_t* loop, llarp_ev_pkt_pipe* pipe) : m_Pipe(pipe)
{
m_Handle.data = this;
m_Ticker.data = this;
uv_poll_init(loop, &m_Handle, m_Pipe->fd);
uv_check_init(loop, &m_Ticker);
}
void
Tick()
{
LoopCall(&m_Handle, std::bind(&llarp_ev_pkt_pipe::tick, m_Pipe));
2019-08-22 13:18:05 +02:00
}
static void
OnRead(uv_poll_t* handle, int status, int)
{
if (status)
2019-08-22 13:18:05 +02:00
{
return;
}
pipe_glue* glue = static_cast<pipe_glue*>(handle->data);
2019-08-22 13:18:05 +02:00
int r = glue->m_Pipe->read(glue->m_Buffer, sizeof(glue->m_Buffer));
if (r <= 0)
2019-08-22 13:18:05 +02:00
return;
const llarp_buffer_t buf{glue->m_Buffer, static_cast<size_t>(r)};
2019-08-22 13:18:05 +02:00
glue->m_Pipe->OnRead(buf);
}
static void
OnClosed(uv_handle_t* h)
{
auto* self = static_cast<pipe_glue*>(h->data);
if (self)
2019-08-22 13:18:05 +02:00
{
h->data = nullptr;
delete self;
}
}
void
Close() override
{
uv_check_stop(&m_Ticker);
uv_close((uv_handle_t*)&m_Handle, &OnClosed);
}
static void
OnTick(uv_check_t* h)
{
llarp::LogTrace("pipe_glue::OnTick() start");
pipe_glue* pipe = static_cast<pipe_glue*>(h->data);
LoopCall(h, std::bind(&pipe_glue::Tick, pipe));
llarp::LogTrace("pipe_glue::OnTick() end");
2019-08-22 13:18:05 +02:00
}
bool
Start()
{
if (uv_poll_start(&m_Handle, UV_READABLE, &OnRead))
2019-08-22 13:18:05 +02:00
return false;
if (uv_check_start(&m_Ticker, &OnTick))
2019-08-22 13:18:05 +02:00
return false;
return true;
}
uv_poll_t m_Handle;
uv_check_t m_Ticker;
};
2019-12-11 22:18:47 +01:00
#if defined(_WIN32) || defined(_WIN64)
#else
2019-06-20 20:28:26 +02:00
struct tun_glue : public glue
2019-06-02 23:17:05 +02:00
{
uv_poll_t m_Handle;
2019-06-04 16:36:54 +02:00
uv_check_t m_Ticker;
2019-06-02 23:17:05 +02:00
llarp_tun_io* const m_Tun;
device* const m_Device;
2019-06-02 23:37:29 +02:00
byte_t m_Buffer[1500];
bool readpkt;
2019-06-02 23:17:05 +02:00
tun_glue(llarp_tun_io* tun) : m_Tun(tun), m_Device(tuntap_init())
{
m_Handle.data = this;
m_Ticker.data = this;
2019-12-11 22:18:47 +01:00
readpkt = false;
2019-06-02 23:17:05 +02:00
}
2019-07-31 01:42:13 +02:00
~tun_glue() override
2019-06-02 23:17:05 +02:00
{
tuntap_destroy(m_Device);
}
static void
2019-06-04 16:36:54 +02:00
OnTick(uv_check_t* timer)
2019-06-02 23:17:05 +02:00
{
llarp::LogTrace("tun_glue::OnTick() start");
tun_glue* tun = static_cast<tun_glue*>(timer->data);
2019-11-20 20:45:23 +01:00
tun->Tick();
llarp::LogTrace("tun_glue::OnTick() end");
2019-06-02 23:17:05 +02:00
}
2019-06-02 23:37:29 +02:00
static void
2019-06-03 14:02:54 +02:00
OnPoll(uv_poll_t* h, int, int events)
2019-06-02 23:37:29 +02:00
{
if (events & UV_READABLE)
2019-06-02 23:37:29 +02:00
{
static_cast<tun_glue*>(h->data)->Read();
2019-06-02 23:37:29 +02:00
}
}
void
Read()
{
auto sz = tuntap_read(m_Device, m_Buffer, sizeof(m_Buffer));
if (sz > 0)
2019-06-03 14:02:54 +02:00
{
llarp::LogDebug("tun read ", sz);
2019-11-20 20:45:23 +01:00
const llarp_buffer_t pkt(m_Buffer, sz);
if (m_Tun && m_Tun->recvpkt)
2019-06-03 14:02:54 +02:00
m_Tun->recvpkt(m_Tun, pkt);
}
2019-06-02 23:37:29 +02:00
}
2019-06-02 23:17:05 +02:00
void
Tick()
{
if (m_Tun->before_write)
2019-06-03 14:02:54 +02:00
m_Tun->before_write(m_Tun);
if (m_Tun->tick)
2019-06-04 16:36:54 +02:00
m_Tun->tick(m_Tun);
2019-06-02 23:17:05 +02:00
}
static void
OnClosed(uv_handle_t* h)
{
auto* self = static_cast<tun_glue*>(h->data);
if (self)
2019-06-20 20:28:26 +02:00
{
h->data = nullptr;
2019-06-20 20:28:26 +02:00
delete self;
}
2019-06-02 23:17:05 +02:00
}
void
2019-06-20 20:28:26 +02:00
Close() override
2019-06-02 23:17:05 +02:00
{
if (m_Tun->impl == nullptr)
2019-11-20 20:45:23 +01:00
return;
m_Tun->impl = nullptr;
2019-06-20 20:28:26 +02:00
uv_check_stop(&m_Ticker);
2019-11-20 20:45:23 +01:00
uv_close((uv_handle_t*)&m_Ticker, [](uv_handle_t* h) {
tun_glue* glue = static_cast<tun_glue*>(h->data);
2019-11-20 20:45:23 +01:00
uv_close((uv_handle_t*)&glue->m_Handle, &OnClosed);
});
2019-06-02 23:17:05 +02:00
}
2019-06-03 14:02:54 +02:00
bool
Write(const byte_t* pkt, size_t sz)
{
return tuntap_write(m_Device, (void*)pkt, sz) != -1;
}
static bool
WritePkt(llarp_tun_io* tun, const byte_t* pkt, size_t sz)
{
tun_glue* glue = static_cast<tun_glue*>(tun->impl);
return glue && glue->Write(pkt, sz);
2019-06-03 14:02:54 +02:00
}
2019-06-02 23:17:05 +02:00
bool
Init(uv_loop_t* loop)
{
2019-08-02 11:27:27 +02:00
memcpy(m_Device->if_name, m_Tun->ifname, sizeof(m_Device->if_name));
if (tuntap_start(m_Device, TUNTAP_MODE_TUNNEL, 0) == -1)
2019-06-02 23:17:05 +02:00
{
llarp::LogError("failed to start up ", m_Tun->ifname);
return false;
}
// copy back
memcpy(m_Tun->ifname, m_Device->if_name, sizeof(m_Tun->ifname));
if (tuntap_set_ip(m_Device, m_Tun->ifaddr, m_Tun->ifaddr, m_Tun->netmask) == -1)
2019-06-03 14:02:54 +02:00
{
llarp::LogError("failed to set address on ", m_Tun->ifname);
return false;
}
if (tuntap_up(m_Device) == -1)
2019-06-02 23:17:05 +02:00
{
llarp::LogError("failed to put up ", m_Tun->ifname);
return false;
}
if (m_Device->tun_fd == -1)
2019-06-02 23:17:05 +02:00
{
llarp::LogError("tun interface ", m_Tun->ifname, " has invalid fd: ", m_Device->tun_fd);
2019-06-02 23:17:05 +02:00
return false;
}
tuntap_set_nonblocking(m_Device, 1);
if (uv_poll_init(loop, &m_Handle, m_Device->tun_fd) == -1)
2019-06-02 23:17:05 +02:00
{
llarp::LogError("failed to start polling on ", m_Tun->ifname);
return false;
}
if (uv_poll_start(&m_Handle, UV_READABLE, &OnPoll))
2019-06-02 23:37:29 +02:00
{
llarp::LogError("failed to start polling on ", m_Tun->ifname);
return false;
}
if (uv_check_init(loop, &m_Ticker) != 0 || uv_check_start(&m_Ticker, &OnTick) != 0)
2019-06-02 23:17:05 +02:00
{
llarp::LogError("failed to set up tun interface timer for ", m_Tun->ifname);
2019-06-02 23:17:05 +02:00
return false;
}
2019-06-03 14:02:54 +02:00
m_Tun->writepkt = &WritePkt;
2019-12-11 22:18:47 +01:00
m_Tun->impl = this;
2019-06-02 23:17:05 +02:00
return true;
}
};
2019-12-11 22:18:47 +01:00
#endif
void
Loop::FlushLogic()
{
llarp::LogTrace("Loop::FlushLogic() start");
while (not m_LogicCalls.empty())
{
auto f = m_LogicCalls.popFront();
f();
}
llarp::LogTrace("Loop::FlushLogic() end");
}
static void
OnAsyncWake(uv_async_t* async_handle)
{
llarp::LogTrace("OnAsyncWake, ticking event loop.");
Loop* loop = static_cast<Loop*>(async_handle->data);
loop->update_time();
loop->process_timer_queue();
loop->process_cancel_queue();
loop->FlushLogic();
2020-06-08 15:07:49 +02:00
auto& log = llarp::LogContext::Instance();
if (log.logStream)
log.logStream->Tick(loop->time_now());
}
2020-06-01 19:31:12 +02:00
Loop::Loop(size_t queue_size)
: llarp_ev_loop(), m_LogicCalls(queue_size), m_timerQueue(20), m_timerCancelQueue(20)
Config file improvements (#1397) * Config file API/comment improvements API improvements: ================= Make the config API use position-independent tag parameters (Required, Default{123}, MultiValue) rather than a sequence of bools with overloads. For example, instead of: conf.defineOption<int>("a", "b", false, true, 123, [] { ... }); you now write: conf.defineOption<int>("a", "b", MultiValue, Default{123}, [] { ... }); The tags are: - Required - MultiValue - Default{value} plus new abilities (see below): - Hidden - RelayOnly - ClientOnly - Comment{"line1", "line2", "line3"} Made option definition more powerful: ===================================== - `Hidden` allows you to define an option that won't show up in the generated config file if it isn't set. - `RelayOnly`/`ClientOnly` sets up an option that is only accepted and only shows up for relay or client configs. (If neither is specified the option shows up in both modes). - `Comment{...}` lets the option comments be specified as part of the defineOption. Comment improvements ==================== - Rewrote comments for various options to expand on details. - Inlined all the comments with the option definitions. - Several options that were missing comments got comments added. - Made various options for deprecated and or internal options hidden by default so that they don't show up in a default config file. - show the section comment (but not option comments) *after* the [section] tag instead of before it as it makes more sense that way (particularly for the [bind] section which has a new long comment to describe how it works). Disable profiling by default ============================ We had this weird state where we use and store profiling by default but never *load* it when starting up. This commit makes us just not use profiling at all unless explicitly enabled. Other misc changes: =================== - change default worker threads to 0 (= num cpus) instead of 1, and fix it to allow 0. - Actually apply worker-threads option - fixed default data-dir value erroneously having quotes around it - reordered ifname/ifaddr/mapaddr (was previously mapaddr/ifaddr/ifname) as mapaddr is a sort of specialization of ifaddr and so makes more sense to come after it (particularly because it now references ifaddr in its help message). - removed peer-stats option (since we always require it for relays and never use it for clients) - removed router profiles filename option (this doesn't need to be configurable) - removed defunct `service-node-seed` option - Change default logging output file to "" (which means stdout), and also made "-" work for stdout. * Router hive compilation fixes * Comments for SNApp SRV settings in ini file * Add extra blank line after section comments * Better deprecated option handling Allow {client,relay}-only options in {relay,client} configs to be specified as implicitly deprecated options: they warn, and don't set anything. Add an explicit `Deprecated` tag and move deprecated option handling into definition.cpp. * Move backwards compat options into section definitions Keep the "addBackwardsCompatibleConfigOptions" only for options in sections that no longer exist. * Fix INI parsing issues & C++17-ify - don't allow inline comments because it seems they aren't allowed in ini formats in general, and is going to cause problems if there is a comment character in a value (e.g. an exit auth string). Additionally it was breaking on a line such as: # some comment; see? because it was treating only `; see?` as the comment and then producing an error message about the rest of the line being invalid. - make section parsing stricter: the `[` and `]` have to be at the beginning at end of the line now (after stripping whitespace). - Move whitespace stripping to the top since everything in here does it. - chop off string_view suffix/prefix rather than maintaining position values - fix potential infinite loop/segfault when given a line such as `]foo[` * Make config parsing failure fatal Load() LogError's and returns false on failure, so we weren't aborting on config file errors. * Formatting: allow `{}` for empty functions/structs Instead of using two lines when empty: { } * Make default dns bind 127.0.0.1 on non-Linux * Don't show empty section; fix tests We can conceivably have sections that only make sense for clients or relays, and so want to completely omit that section if we have no options for the type of config being generated. Also fixes missing empty lines between tests. Co-authored-by: Thomas Winget <tewinget@gmail.com>
2020-10-08 00:22:58 +02:00
{}
2019-06-02 23:17:05 +02:00
bool
Loop::init()
{
if (uv_loop_init(&m_Impl) == -1)
2019-06-02 23:17:05 +02:00
return false;
#ifdef LOKINET_DEBUG
last_time = 0;
loop_run_count = 0;
#endif
m_Impl.data = this;
2019-12-11 22:18:47 +01:00
#if defined(_WIN32) || defined(_WIN64)
#else
uv_loop_configure(&m_Impl, UV_LOOP_BLOCK_SIGNAL, SIGPIPE);
2019-12-11 22:18:47 +01:00
#endif
m_TickTimer = new uv_timer_t;
m_TickTimer->data = this;
if (uv_timer_init(&m_Impl, m_TickTimer) == -1)
return false;
2019-06-02 23:17:05 +02:00
m_Run.store(true);
m_nextID.store(0);
m_WakeUp.data = this;
uv_async_init(&m_Impl, &m_WakeUp, &OnAsyncWake);
return true;
2019-06-02 23:17:05 +02:00
}
void
Loop::update_time()
{
2019-11-05 17:58:53 +01:00
llarp_ev_loop::update_time();
uv_update_time(&m_Impl);
2019-06-02 23:17:05 +02:00
}
bool
Loop::running() const
{
return m_Run.load();
}
bool
2020-05-06 22:38:44 +02:00
Loop::tcp_connect(llarp_tcp_connecter* tcp, const llarp::SockAddr& addr)
2019-06-02 23:17:05 +02:00
{
auto* impl = new conn_glue(&m_Impl, tcp, addr);
tcp->impl = impl;
if (impl->ConnectAsync())
2019-06-03 13:14:31 +02:00
return true;
2019-06-02 23:17:05 +02:00
delete impl;
tcp->impl = nullptr;
return false;
}
static void
OnTickTimeout(uv_timer_t* timer)
{
uv_stop(timer->loop);
}
int
Loop::run()
{
llarp::LogTrace("Loop::run()");
m_EventLoopThreadID = std::this_thread::get_id();
return uv_run(&m_Impl, UV_RUN_DEFAULT);
}
2019-06-02 23:17:05 +02:00
int
Loop::tick(int ms)
{
if (m_Run)
2019-11-20 20:45:23 +01:00
{
#ifdef TESTNET_SPEED
ms *= TESTNET_SPEED;
#endif
uv_timer_start(m_TickTimer, &OnTickTimeout, ms, 0);
2019-11-20 20:45:23 +01:00
uv_run(&m_Impl, UV_RUN_ONCE);
}
2019-06-02 23:17:05 +02:00
return 0;
}
struct TimerData
{
Loop* loop;
uint64_t job_id;
};
void
CloseUVTimer(uv_timer_t* timer)
{
// have to delete timer handle this way because libuv.
uv_timer_stop(timer);
uv_close((uv_handle_t*)timer, [](uv_handle_t* handle) { delete (uv_timer_t*)handle; });
}
static void
OnUVTimer(uv_timer_t* timer)
{
TimerData* timer_data = static_cast<TimerData*>(timer->data);
Loop* loop = timer_data->loop;
loop->do_timer_job(timer_data->job_id);
delete timer_data;
CloseUVTimer(timer);
}
uint32_t
Loop::call_after_delay(llarp_time_t delay_ms, std::function<void(void)> callback)
{
llarp::LogTrace("Loop::call_after_delay()");
#ifdef TESTNET_SPEED
delay_ms *= TESTNET_SPEED;
#endif
PendingTimer timer;
timer.delay_ms = delay_ms;
timer.callback = callback;
timer.job_id = m_nextID++;
uint64_t job_id = timer.job_id;
m_timerQueue.pushBack(std::move(timer));
uv_async_send(&m_WakeUp);
return job_id;
}
void
Loop::cancel_delayed_call(uint32_t job_id)
{
m_timerCancelQueue.pushBack(job_id);
uv_async_send(&m_WakeUp);
}
void
Loop::process_timer_queue()
{
while (not m_timerQueue.empty())
{
PendingTimer job = m_timerQueue.popFront();
uint64_t job_id = job.job_id;
m_pendingCalls.emplace(job_id, std::move(job.callback));
TimerData* timer_data = new TimerData;
timer_data->loop = this;
timer_data->job_id = job_id;
uv_timer_t* newTimer = new uv_timer_t;
newTimer->data = (void*)timer_data;
uv_timer_init(&m_Impl, newTimer);
2020-02-24 20:40:45 +01:00
uv_timer_start(newTimer, &OnUVTimer, job.delay_ms.count(), 0);
}
}
void
Loop::process_cancel_queue()
{
while (not m_timerCancelQueue.empty())
{
uint64_t job_id = m_timerCancelQueue.popFront();
m_pendingCalls.erase(job_id);
}
}
void
Loop::do_timer_job(uint64_t job_id)
{
auto itr = m_pendingCalls.find(job_id);
if (itr != m_pendingCalls.end())
{
if (itr->second)
itr->second();
m_pendingCalls.erase(itr->first);
}
}
2019-06-02 23:17:05 +02:00
void
Loop::stop()
{
if (m_Run)
2019-11-20 20:45:23 +01:00
{
llarp::LogInfo("stopping event loop");
CloseAll();
uv_stop(&m_Impl);
2019-11-20 20:45:23 +01:00
}
2019-06-02 23:17:05 +02:00
m_Run.store(false);
2019-06-20 20:28:26 +02:00
}
void
Loop::CloseAll()
{
llarp::LogInfo("Closing all handles");
2019-08-13 23:38:00 +02:00
uv_walk(
&m_Impl,
2019-08-13 23:38:00 +02:00
[](uv_handle_t* h, void*) {
if (uv_is_closing(h))
2019-08-13 23:38:00 +02:00
return;
if (h->data && uv_is_active(h) && h->type != UV_TIMER && h->type != UV_POLL)
2019-08-13 23:38:00 +02:00
{
2020-06-17 15:07:05 +02:00
auto glue = reinterpret_cast<libuv::glue*>(h->data);
if (glue)
glue->Close();
2019-08-13 23:38:00 +02:00
}
},
nullptr);
2019-06-20 20:28:26 +02:00
}
void
Loop::stopped()
{
llarp::LogInfo("we have stopped");
2019-06-02 23:17:05 +02:00
}
bool
2020-05-06 22:38:44 +02:00
Loop::udp_listen(llarp_udp_io* udp, const llarp::SockAddr& src)
2019-06-02 23:17:05 +02:00
{
auto* impl = new udp_glue(&m_Impl, udp, src);
udp->impl = impl;
if (impl->Bind())
2019-06-02 23:17:05 +02:00
{
return true;
}
llarp::LogError("Loop::udp_listen failed to bind");
2019-06-20 20:28:26 +02:00
delete impl;
2019-06-02 23:17:05 +02:00
return false;
}
2019-09-05 19:39:09 +02:00
bool
Loop::add_ticker(std::function<void(void)> func)
2019-09-05 19:39:09 +02:00
{
auto* ticker = new ticker_glue(&m_Impl, func);
if (ticker->Start())
2019-09-05 19:39:09 +02:00
{
return true;
}
delete ticker;
return false;
}
2019-06-02 23:17:05 +02:00
bool
Loop::udp_close(llarp_udp_io* udp)
{
if (udp == nullptr)
2019-06-02 23:17:05 +02:00
return false;
auto* glue = static_cast<udp_glue*>(udp->impl);
if (glue == nullptr)
2019-06-02 23:17:05 +02:00
return false;
glue->Close();
return true;
}
bool
Loop::tun_listen(llarp_tun_io* tun)
{
2019-12-11 22:18:47 +01:00
#if defined(_WIN32) || defined(_WIN64)
(void)tun;
return false;
#else
2019-07-31 01:42:13 +02:00
auto* glue = new tun_glue(tun);
2019-12-11 22:18:47 +01:00
tun->impl = glue;
if (glue->Init(&m_Impl))
2019-06-02 23:17:05 +02:00
{
return true;
}
delete glue;
return false;
2019-12-11 22:18:47 +01:00
#endif
2019-06-02 23:17:05 +02:00
}
bool
2020-05-06 22:38:44 +02:00
Loop::tcp_listen(llarp_tcp_acceptor* tcp, const llarp::SockAddr& addr)
2019-06-02 23:17:05 +02:00
{
auto* glue = new conn_glue(&m_Impl, tcp, addr);
tcp->impl = glue;
if (glue->Server())
2019-06-03 13:14:31 +02:00
return true;
tcp->impl = nullptr;
delete glue;
return false;
2019-06-02 23:17:05 +02:00
}
2019-08-22 13:18:05 +02:00
bool
Loop::add_pipe(llarp_ev_pkt_pipe* p)
{
auto* glue = new pipe_glue(&m_Impl, p);
if (glue->Start())
2019-08-22 13:18:05 +02:00
return true;
delete glue;
return false;
}
void
Loop::call_soon(std::function<void(void)> f)
{
if (not m_EventLoopThreadID.has_value())
{
m_LogicCalls.tryPushBack(f);
uv_async_send(&m_WakeUp);
return;
}
const auto inEventLoop = *m_EventLoopThreadID == std::this_thread::get_id();
if (inEventLoop and m_LogicCalls.full())
{
FlushLogic();
}
m_LogicCalls.pushBack(f);
uv_async_send(&m_WakeUp);
}
void
OnUVPollFDReadable(uv_poll_t* handle, int status, [[maybe_unused]] int events)
{
2020-06-11 21:36:19 +02:00
if (status < 0)
return; // probably fd was closed
2020-06-11 21:36:19 +02:00
auto func = static_cast<libuv::Loop::Callback*>(handle->data);
(*func)();
}
void
Loop::register_poll_fd_readable(int fd, Callback callback)
{
if (m_Polls.count(fd))
{
2020-06-11 21:36:19 +02:00
llarp::LogError(
"Attempting to create event loop poll on fd ",
fd,
", but an event loop poll for that fd already exists.");
return;
}
// new a copy as the one passed in here will go out of scope
auto function_ptr = new Callback(callback);
auto& new_poll = m_Polls[fd];
uv_poll_init(&m_Impl, &new_poll, fd);
2020-06-11 21:36:19 +02:00
new_poll.data = (void*)function_ptr;
uv_poll_start(&new_poll, UV_READABLE, &OnUVPollFDReadable);
}
void
Loop::deregister_poll_fd_readable(int fd)
{
auto itr = m_Polls.find(fd);
if (itr != m_Polls.end())
{
uv_poll_stop(&(itr->second));
2020-06-11 21:36:19 +02:00
auto func = static_cast<Callback*>(itr->second.data);
delete func;
m_Polls.erase(itr);
}
}
2019-06-02 23:17:05 +02:00
} // namespace libuv