mirror of https://github.com/oxen-io/lokinet
remove old dead code
This commit is contained in:
parent
6f46fe7e0c
commit
ae96458f8a
|
@ -1 +0,0 @@
|
|||
lokinet-bootserv
|
|
@ -1,29 +0,0 @@
|
|||
# replace your.server.tld with your server's fqdn
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name your.server.tld;
|
||||
location / {
|
||||
return 302 https://your.server.tld$request_uri;
|
||||
}
|
||||
location /.well-known/acme-challenge {
|
||||
root /var/www/letsencrypt;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name your.server.tld;
|
||||
ssl_certificate /etc/letsencrypt/live/your.server.tld/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/your.server.tld/privkey.pem;
|
||||
|
||||
location / {
|
||||
root /var/www/lokinet-bootserv;
|
||||
}
|
||||
|
||||
location /bootstrap.signed {
|
||||
include /etc/nginx/fastcgi_params;
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/lokinet-bootserv;
|
||||
fastcgi_pass unix://tmp/cgi.sock;
|
||||
}
|
||||
}
|
|
@ -1,4 +0,0 @@
|
|||
|
||||
# set me to where the nodedb is for lokinet
|
||||
#[nodedb]
|
||||
#dir=/path/to/nodedb/
|
|
@ -1,20 +0,0 @@
|
|||
|
||||
SRC = $(sort $(wildcard src/*.cpp))
|
||||
OBJS = $(SRC:.cpp=.cpp.o)
|
||||
|
||||
CGI_EXE = lokinet-bootserv
|
||||
|
||||
CXX = clang++
|
||||
|
||||
all: build
|
||||
|
||||
build: $(CGI_EXE)
|
||||
|
||||
%.cpp.o: %.cpp
|
||||
$(CXX) -g -std=c++17 -c -Wall -Werror -Wpedantic $^ -o $^.o
|
||||
|
||||
$(CGI_EXE): $(OBJS)
|
||||
$(CXX) -o $(CGI_EXE) $^
|
||||
|
||||
clean:
|
||||
rm -f $(CGI_EXE) $(OBJS)
|
|
@ -1,35 +0,0 @@
|
|||
# lokinet-bootserv
|
||||
|
||||
cgi executable for serving a random RC for bootstrap from a nodedb
|
||||
|
||||
## configuring
|
||||
|
||||
copy the example config (privileged)
|
||||
|
||||
# cp configs/lokinet-bootserv.ini /usr/local/etc/lokinet-bootserv.ini
|
||||
|
||||
edit config to have proper values,
|
||||
specifically make sure the `[nodedb]` section has a `dir` value that points to a static copy of a healthy nodedb
|
||||
|
||||
## building
|
||||
|
||||
to build:
|
||||
|
||||
$ make
|
||||
|
||||
## installing (priviledged)
|
||||
|
||||
install cgi binary:
|
||||
|
||||
# cp lokinet-bootserv /usr/local/bin/lokinet-bootserv
|
||||
|
||||
set up with nginx cgi:
|
||||
|
||||
# cp configs/lokinet-bootserv-nginx.conf /etc/nginx/sites-available/lokinet-bootserv.conf
|
||||
# ln -s /etc/nginx/sites-available/lokinet-bootserv.conf /etc/nginx/sites-enabled/
|
||||
|
||||
## maintainence
|
||||
|
||||
add the following to crontab
|
||||
|
||||
0 0 * * * /usr/local/bin/lokinet-bootserv --cron
|
|
@ -1,171 +0,0 @@
|
|||
#include "lokinet-cgi.hpp"
|
||||
#include <fstream>
|
||||
#include <dirent.h>
|
||||
#include <list>
|
||||
#include <sstream>
|
||||
|
||||
namespace lokinet
|
||||
{
|
||||
namespace bootserv
|
||||
{
|
||||
CGIHandler::CGIHandler(std::ostream& o) : Handler(o)
|
||||
{
|
||||
}
|
||||
|
||||
CGIHandler::~CGIHandler()
|
||||
{
|
||||
}
|
||||
|
||||
int
|
||||
CGIHandler::Exec(const Config& conf)
|
||||
{
|
||||
const char* e = getenv("REQUEST_METHOD");
|
||||
if(e == nullptr)
|
||||
return ReportError("$REQUEST_METHOD not set");
|
||||
std::string_view method(e);
|
||||
|
||||
if(method != "GET")
|
||||
{
|
||||
out << "Content-Type: text/plain" << std::endl;
|
||||
out << "Status: 405 Method Not Allowed" << std::endl << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::string fname;
|
||||
if(!conf.VisitSection(
|
||||
"nodedb", [&](const Config::Section_t& sect) -> bool {
|
||||
auto itr = sect.find("dir");
|
||||
if(itr == sect.end())
|
||||
return false;
|
||||
fname = PickRandomFileInDir(
|
||||
std::string(itr->second.data(), itr->second.size()));
|
||||
return true;
|
||||
}))
|
||||
|
||||
return ReportError("bad values in nodedb section of config");
|
||||
if(fname.empty())
|
||||
{
|
||||
// no files in nodedb
|
||||
out << "Content-Type: text/plain" << std::endl;
|
||||
out << "Status: 404 Not Found" << std::endl << std::endl;
|
||||
return 0;
|
||||
}
|
||||
return ServeFile(fname.c_str(), "application/octect-stream");
|
||||
}
|
||||
|
||||
std::string
|
||||
CGIHandler::PickRandomFileInDir(std::string dirname) const
|
||||
{
|
||||
// collect files
|
||||
std::list< std::string > files;
|
||||
{
|
||||
DIR* d = opendir(dirname.c_str());
|
||||
if(d == nullptr)
|
||||
{
|
||||
return "";
|
||||
};
|
||||
std::list< std::string > subdirs;
|
||||
dirent* ent = nullptr;
|
||||
while((ent = readdir(d)))
|
||||
{
|
||||
std::string_view f = ent->d_name;
|
||||
if(f != "." && f != "..")
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << dirname;
|
||||
ss << '/';
|
||||
ss << f;
|
||||
subdirs.emplace_back(ss.str());
|
||||
}
|
||||
}
|
||||
closedir(d);
|
||||
for(const auto& subdir : subdirs)
|
||||
{
|
||||
d = opendir(subdir.c_str());
|
||||
if(d)
|
||||
{
|
||||
while((ent = readdir(d)))
|
||||
{
|
||||
std::string_view f;
|
||||
f = ent->d_name;
|
||||
if(f != "." && f != ".."
|
||||
&& f.find_last_of(".signed") != std::string_view::npos)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << subdir << "/" << f;
|
||||
files.emplace_back(ss.str());
|
||||
}
|
||||
}
|
||||
closedir(d);
|
||||
}
|
||||
}
|
||||
}
|
||||
uint32_t randint;
|
||||
{
|
||||
std::basic_ifstream< uint32_t > randf("/dev/urandom");
|
||||
if(!randf.is_open())
|
||||
return "";
|
||||
randf.read(&randint, 1);
|
||||
}
|
||||
auto itr = files.begin();
|
||||
if(files.size() > 1)
|
||||
std::advance(itr, randint % files.size());
|
||||
return *itr;
|
||||
}
|
||||
|
||||
int
|
||||
CGIHandler::ServeFile(const char* fname, const char* contentType) const
|
||||
{
|
||||
std::ifstream f(fname);
|
||||
if(f.is_open())
|
||||
{
|
||||
f.seekg(0, std::ios::end);
|
||||
auto sz = f.tellg();
|
||||
f.seekg(0, std::ios::beg);
|
||||
if(sz)
|
||||
{
|
||||
out << "Content-Type: " << contentType << std::endl;
|
||||
out << "Status: 200 OK" << std::endl;
|
||||
out << "Content-Length: " << std::to_string(sz) << std::endl
|
||||
<< std::endl;
|
||||
char buf[512] = {0};
|
||||
size_t r = 0;
|
||||
while((r = f.readsome(buf, sizeof(buf))) > 0)
|
||||
out.write(buf, r);
|
||||
out << std::flush;
|
||||
}
|
||||
else
|
||||
{
|
||||
out << "Content-Type: text/plain" << std::endl;
|
||||
out << "Status: 500 Internal Server Error" << std::endl << std::endl;
|
||||
out << "could not serve '" << fname << "' as it is an empty file"
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
out << "Content-Type: text/plain" << std::endl;
|
||||
out << "Status: 404 Not Found" << std::endl << std::endl;
|
||||
out << "could not serve '" << fname
|
||||
<< "' as it does not exist on the filesystem" << std::endl;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
CGIHandler::ReportError(const char* err)
|
||||
{
|
||||
out << "Content-Type: text/plain" << std::endl;
|
||||
out << "Status: 500 Internal Server Error" << std::endl << std::endl;
|
||||
out << err << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
Handler_ptr
|
||||
NewCGIHandler(std::ostream& out)
|
||||
{
|
||||
return std::make_unique< CGIHandler >(out);
|
||||
}
|
||||
|
||||
} // namespace bootserv
|
||||
} // namespace lokinet
|
|
@ -1,43 +0,0 @@
|
|||
#ifndef LOKINET_BOOTSERV_HANDLER_HPP
|
||||
#define LOKINET_BOOTSERV_HANDLER_HPP
|
||||
#include <iostream>
|
||||
#include "lokinet-config.hpp"
|
||||
|
||||
namespace lokinet
|
||||
{
|
||||
namespace bootserv
|
||||
{
|
||||
struct Handler
|
||||
{
|
||||
Handler(std::ostream& o) : out(o){};
|
||||
|
||||
virtual ~Handler(){};
|
||||
|
||||
/// handle command
|
||||
/// return exit code
|
||||
virtual int
|
||||
Exec(const Config& conf) = 0;
|
||||
|
||||
/// report an error to system however that is done
|
||||
/// return exit code
|
||||
virtual int
|
||||
ReportError(const char* err) = 0;
|
||||
|
||||
protected:
|
||||
std::ostream& out;
|
||||
};
|
||||
|
||||
using Handler_ptr = std::unique_ptr< Handler >;
|
||||
|
||||
/// create cgi handler
|
||||
Handler_ptr
|
||||
NewCGIHandler(std::ostream& out);
|
||||
|
||||
/// create cron handler
|
||||
Handler_ptr
|
||||
NewCronHandler(std::ostream& out);
|
||||
|
||||
} // namespace bootserv
|
||||
} // namespace lokinet
|
||||
|
||||
#endif
|
|
@ -1,31 +0,0 @@
|
|||
#ifndef BOOTSERV_LOKINET_CRON_HPP
|
||||
#define BOOTSERV_LOKINET_CRON_HPP
|
||||
|
||||
#include "handler.hpp"
|
||||
|
||||
namespace lokinet
|
||||
{
|
||||
namespace bootserv
|
||||
{
|
||||
struct CGIHandler final : public Handler
|
||||
{
|
||||
CGIHandler(std::ostream& o);
|
||||
~CGIHandler();
|
||||
|
||||
int
|
||||
Exec(const Config& conf) override;
|
||||
|
||||
int
|
||||
ReportError(const char* err) override;
|
||||
|
||||
int
|
||||
ServeFile(const char* fname, const char* mime) const;
|
||||
|
||||
std::string
|
||||
PickRandomFileInDir(std::string dirname) const;
|
||||
};
|
||||
|
||||
} // namespace bootserv
|
||||
} // namespace lokinet
|
||||
|
||||
#endif
|
|
@ -1,132 +0,0 @@
|
|||
#include "lokinet-config.hpp"
|
||||
#include <fstream>
|
||||
#include <list>
|
||||
#include <iostream>
|
||||
|
||||
namespace lokinet
|
||||
{
|
||||
namespace bootserv
|
||||
{
|
||||
const char* Config::DefaultPath = "/usr/local/etc/lokinet-bootserv.ini";
|
||||
|
||||
bool
|
||||
Config::LoadFile(const char* fname)
|
||||
{
|
||||
{
|
||||
std::ifstream f(fname);
|
||||
if(!f.is_open())
|
||||
return false;
|
||||
f.seekg(0, std::ios::end);
|
||||
m_Data.resize(f.tellg());
|
||||
f.seekg(0, std::ios::beg);
|
||||
if(m_Data.size() == 0)
|
||||
return false;
|
||||
f.read(m_Data.data(), m_Data.size());
|
||||
}
|
||||
return Parse();
|
||||
}
|
||||
|
||||
void
|
||||
Config::Clear()
|
||||
{
|
||||
m_Config.clear();
|
||||
m_Data.clear();
|
||||
}
|
||||
|
||||
bool
|
||||
Config::Parse()
|
||||
{
|
||||
std::list< String_t > lines;
|
||||
{
|
||||
auto itr = m_Data.begin();
|
||||
// split into lines
|
||||
while(itr != m_Data.end())
|
||||
{
|
||||
auto beg = itr;
|
||||
while(itr != m_Data.end() && *itr != '\n' && *itr != '\r')
|
||||
++itr;
|
||||
lines.emplace_back(std::addressof(*beg), (itr - beg));
|
||||
if(itr == m_Data.end())
|
||||
break;
|
||||
++itr;
|
||||
}
|
||||
}
|
||||
|
||||
String_t sectName;
|
||||
|
||||
for(const auto& line : lines)
|
||||
{
|
||||
String_t realLine;
|
||||
auto comment = line.find_first_of(';');
|
||||
if(comment == String_t::npos)
|
||||
comment = line.find_first_of('#');
|
||||
if(comment == String_t::npos)
|
||||
realLine = line;
|
||||
else
|
||||
realLine = line.substr(0, comment);
|
||||
// blank or commented line?
|
||||
if(realLine.size() == 0)
|
||||
continue;
|
||||
// find delimiters
|
||||
auto sectOpenPos = realLine.find_first_of('[');
|
||||
auto sectClosPos = realLine.find_first_of(']');
|
||||
auto kvDelim = realLine.find_first_of('=');
|
||||
if(sectOpenPos != String_t::npos && sectClosPos != String_t::npos
|
||||
&& kvDelim == String_t::npos)
|
||||
{
|
||||
// section header
|
||||
|
||||
// clamp whitespaces
|
||||
++sectOpenPos;
|
||||
while(std::isspace(realLine[sectOpenPos])
|
||||
&& sectOpenPos != sectClosPos)
|
||||
++sectOpenPos;
|
||||
--sectClosPos;
|
||||
while(std::isspace(realLine[sectClosPos])
|
||||
&& sectClosPos != sectOpenPos)
|
||||
--sectClosPos;
|
||||
// set section name
|
||||
sectName = realLine.substr(sectOpenPos, sectClosPos);
|
||||
}
|
||||
else if(kvDelim != String_t::npos)
|
||||
{
|
||||
// key value pair
|
||||
String_t::size_type k_start = 0;
|
||||
String_t::size_type k_end = kvDelim;
|
||||
String_t::size_type v_start = kvDelim + 1;
|
||||
String_t::size_type v_end = realLine.size() - 1;
|
||||
// clamp whitespaces
|
||||
while(std::isspace(realLine[k_start]) && k_start != kvDelim)
|
||||
++k_start;
|
||||
while(std::isspace(realLine[k_end]) && k_end != k_start)
|
||||
--k_end;
|
||||
while(std::isspace(realLine[v_start]) && v_start != v_end)
|
||||
++v_start;
|
||||
while(std::isspace(realLine[v_end]))
|
||||
--v_end;
|
||||
|
||||
// sect.k = v
|
||||
String_t k = realLine.substr(k_start, k_end);
|
||||
String_t v = realLine.substr(v_start, v_end);
|
||||
Section_t& sect = m_Config[sectName];
|
||||
sect[k] = v;
|
||||
}
|
||||
else // malformed?
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
Config::VisitSection(
|
||||
const char* name,
|
||||
std::function< bool(const Section_t& sect) > visit) const
|
||||
{
|
||||
auto itr = m_Config.find(name);
|
||||
if(itr == m_Config.end())
|
||||
return false;
|
||||
return visit(itr->second);
|
||||
}
|
||||
|
||||
} // namespace bootserv
|
||||
} // namespace lokinet
|
|
@ -1,47 +0,0 @@
|
|||
#ifndef LOKINET_BOOTSERV_CONFIG_HPP
|
||||
#define LOKINET_BOOTSERV_CONFIG_HPP
|
||||
#include <unordered_map>
|
||||
#include <string_view>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
namespace lokinet
|
||||
{
|
||||
namespace bootserv
|
||||
{
|
||||
struct Config
|
||||
{
|
||||
using String_t = std::string_view;
|
||||
using Section_t = std::unordered_map< String_t, String_t >;
|
||||
using Config_impl_t = std::unordered_map< String_t, Section_t >;
|
||||
|
||||
static const char* DefaultPath;
|
||||
|
||||
/// clear config
|
||||
void
|
||||
Clear();
|
||||
|
||||
/// load config file for bootserv
|
||||
/// return true on success
|
||||
/// return false on error
|
||||
bool
|
||||
LoadFile(const char* fname);
|
||||
|
||||
/// visit a section in config read only by name
|
||||
/// return false if no section or value propagated from visitor
|
||||
bool
|
||||
VisitSection(const char* name,
|
||||
std::function< bool(const Section_t&) > visit) const;
|
||||
|
||||
private:
|
||||
bool
|
||||
Parse();
|
||||
|
||||
std::vector< char > m_Data;
|
||||
Config_impl_t m_Config;
|
||||
};
|
||||
} // namespace bootserv
|
||||
} // namespace lokinet
|
||||
|
||||
#endif
|
|
@ -1,37 +0,0 @@
|
|||
#include "lokinet-cron.hpp"
|
||||
|
||||
namespace lokinet
|
||||
{
|
||||
namespace bootserv
|
||||
{
|
||||
CronHandler::CronHandler(std::ostream& o) : Handler(o)
|
||||
{
|
||||
}
|
||||
|
||||
CronHandler::~CronHandler()
|
||||
{
|
||||
}
|
||||
|
||||
int
|
||||
CronHandler::Exec(const Config& conf)
|
||||
{
|
||||
// this runs the cron tasks
|
||||
// TODO: implement me
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
CronHandler::ReportError(const char* err)
|
||||
{
|
||||
out << "error: " << err << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
Handler_ptr
|
||||
NewCronHandler(std::ostream& out)
|
||||
{
|
||||
return std::make_unique< CronHandler >(out);
|
||||
}
|
||||
|
||||
} // namespace bootserv
|
||||
} // namespace lokinet
|
|
@ -1,25 +0,0 @@
|
|||
#ifndef BOOTSERV_LOKINET_CRON_HPP
|
||||
#define BOOTSERV_LOKINET_CRON_HPP
|
||||
|
||||
#include "handler.hpp"
|
||||
|
||||
namespace lokinet
|
||||
{
|
||||
namespace bootserv
|
||||
{
|
||||
struct CronHandler final : public Handler
|
||||
{
|
||||
CronHandler(std::ostream& o);
|
||||
~CronHandler();
|
||||
|
||||
int
|
||||
Exec(const Config& conf) override;
|
||||
|
||||
int
|
||||
ReportError(const char* err) override;
|
||||
};
|
||||
|
||||
} // namespace bootserv
|
||||
} // namespace lokinet
|
||||
|
||||
#endif
|
|
@ -1,60 +0,0 @@
|
|||
#include "handler.hpp"
|
||||
#include "lokinet-config.hpp"
|
||||
|
||||
#include <getopt.h>
|
||||
#include <string_view>
|
||||
#include <sstream>
|
||||
|
||||
static int
|
||||
printhelp(const char* exe)
|
||||
{
|
||||
std::cout << "usage: " << exe << " [--cron] [--conf /path/to/alt/config.ini]"
|
||||
<< std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int
|
||||
main(int argc, char* argv[])
|
||||
{
|
||||
bool RunCron = false;
|
||||
|
||||
const char* confFile = lokinet::bootserv::Config::DefaultPath;
|
||||
lokinet::bootserv::Config config;
|
||||
|
||||
lokinet::bootserv::Handler_ptr handler;
|
||||
|
||||
option longopts[] = {{"cron", no_argument, 0, 'C'},
|
||||
{"help", no_argument, 0, 'h'},
|
||||
{"conf", required_argument, 0, 'c'},
|
||||
{0, 0, 0, 0}};
|
||||
|
||||
int c = 0;
|
||||
int index = 0;
|
||||
while((c = getopt_long(argc, argv, "hCc:", longopts, &index)) != -1)
|
||||
{
|
||||
switch(c)
|
||||
{
|
||||
case 'h':
|
||||
return printhelp(argv[0]);
|
||||
case 'C':
|
||||
RunCron = true;
|
||||
break;
|
||||
case 'c':
|
||||
confFile = optarg;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(RunCron)
|
||||
handler = lokinet::bootserv::NewCronHandler(std::cout);
|
||||
else
|
||||
handler = lokinet::bootserv::NewCGIHandler(std::cout);
|
||||
|
||||
if(!config.LoadFile(confFile))
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "failed to load " << confFile;
|
||||
return handler->ReportError(ss.str().c_str());
|
||||
}
|
||||
else
|
||||
return handler->Exec(config);
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
<?xml version="1.0"?>
|
||||
<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
|
||||
<!--
|
||||
Created by Manifold
|
||||
--><service_bundle type="manifest" name="lokinet">
|
||||
|
||||
<service name="site/lokinet" type="service" version="1">
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<dependency name="network" grouping="require_all" restart_on="error" type="service">
|
||||
<service_fmri value="svc:/milestone/network:default"/>
|
||||
</dependency>
|
||||
|
||||
<dependency name="filesystem" grouping="require_all" restart_on="error" type="service">
|
||||
<service_fmri value="svc:/system/filesystem/local"/>
|
||||
</dependency>
|
||||
|
||||
|
||||
<instance name="default" enabled="false">
|
||||
|
||||
|
||||
<method_context>
|
||||
<method_credential user="lokinet" group="lokinet"/>
|
||||
</method_context>
|
||||
|
||||
<exec_method type="method" name="start" exec="/usr/bin/lokinet %{config_file}" timeout_seconds="60"/>
|
||||
|
||||
<exec_method type="method" name="stop" exec="/usr/bin/kill -INT <<< `pgrep lokinet`" timeout_seconds="60"/>
|
||||
|
||||
<property_group name="startd" type="framework">
|
||||
<propval name="duration" type="astring" value="child"/>
|
||||
|
||||
|
||||
<propval name="ignore_error" type="astring" value="core,signal"/>
|
||||
</property_group>
|
||||
|
||||
<property_group name="application" type="application">
|
||||
<propval name="config_file" type="astring" value="/etc/loki/lokinet.ini"/>
|
||||
</property_group>
|
||||
|
||||
</instance>
|
||||
|
||||
|
||||
|
||||
<stability value="Evolving"/>
|
||||
|
||||
<template>
|
||||
<common_name>
|
||||
<loctext xml:lang="C">
|
||||
LokiNET: Anonymous Network layer thingydoo.
|
||||
</loctext>
|
||||
</common_name>
|
||||
</template>
|
||||
|
||||
</service>
|
||||
|
||||
</service_bundle>
|
|
@ -1,18 +0,0 @@
|
|||
#!/usr/sbin/dtrace -s
|
||||
|
||||
syscall:::entry
|
||||
/pid == $target/
|
||||
{
|
||||
@calls[ustack(10), probefunc] = count();
|
||||
}
|
||||
|
||||
profile:::tick-1sec
|
||||
{
|
||||
/** print */
|
||||
printa(@calls);
|
||||
/** clear */
|
||||
clear(@calls);
|
||||
trunc(@calls, 15);
|
||||
}
|
||||
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
. /etc/rc.subr
|
||||
|
||||
name=lokinet
|
||||
rcvar=lokinet_enable
|
||||
|
||||
command="/usr/local/bin/${name}"
|
||||
command_args="/usr/local/etc/${name}/daemon.ini > /dev/null 2>&1"
|
||||
|
||||
pidfile="/usr/local/etc/${name}/lokinet.pid"
|
||||
|
||||
required_files="/usr/local/etc/${name}/daemon.ini"
|
||||
|
||||
sig_reload="HUP"
|
||||
|
||||
start_precmd="${command} -g /usr/local/etc/${name}/daemon.ini"
|
||||
|
||||
load_rc_config $name
|
||||
run_rc_command "$1"
|
|
@ -1,79 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# requires python3-requests
|
||||
#
|
||||
import requests
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
from collections import defaultdict as Dict
|
||||
|
||||
from requests.exceptions import RequestException
|
||||
|
||||
|
||||
def jsonrpc(method, **args):
|
||||
return requests.post('http://127.0.0.1:1190/', data=json.dumps(
|
||||
{'method': method, 'params': args, 'id': 'munin'}), headers={'content-type': 'application/json'}).json()
|
||||
|
||||
|
||||
def exit_sessions_main():
|
||||
if len(sys.argv) == 2 and sys.argv[1] == 'config':
|
||||
print("graph_title lokinet exit sessions")
|
||||
print("graph_vlabel sessions")
|
||||
print("graph_category network")
|
||||
print("graph_info This graph shows the number of exit sessions on a lokinet exit")
|
||||
print("_exit_sessions.info Number of exit sessions")
|
||||
print("_exit_sessions.label sessions")
|
||||
else:
|
||||
count = 0
|
||||
try:
|
||||
j = jsonrpc("llarp.admin.exit.list")
|
||||
count = len(j['result'])
|
||||
except RequestException:
|
||||
pass
|
||||
print("_exit_sessions.value {}".format(count))
|
||||
|
||||
|
||||
def peers_main():
|
||||
if len(sys.argv) == 2 and sys.argv[1] == 'config':
|
||||
print("graph_title lokinet peers")
|
||||
print("graph_vlabel peers")
|
||||
print("graph_category network")
|
||||
print("graph_info This graph shows the number of node to node sessions of this lokinet router")
|
||||
print("_peers_outbound.info Number of outbound lokinet peers")
|
||||
print("_peers_inbound.info Number of inbound lokinet peers")
|
||||
print("_peers_outbound.label outbound peers")
|
||||
print("_peers_inbound.label inbound peers")
|
||||
print("_peers_clients.info Number of lokinet client peers")
|
||||
print("_peers_clients.label lokinet client peers")
|
||||
else:
|
||||
inbound = Dict(int)
|
||||
outbound = Dict(int)
|
||||
clients = Dict(int)
|
||||
try:
|
||||
j = jsonrpc("llarp.admin.link.neighboors")
|
||||
for peer in j['result']:
|
||||
if peer["svcnode"]:
|
||||
if peer["outbound"]:
|
||||
outbound[peer['ident']] += 1
|
||||
else:
|
||||
inbound[peer['ident']] += 1
|
||||
else:
|
||||
clients[peer['ident']] += 1
|
||||
except RequestException:
|
||||
pass
|
||||
|
||||
print("_peers_outbound.value {}".format(len(outbound)))
|
||||
print("_peers_inbound.value {}".format(len(inbound)))
|
||||
print("_peers_clients.value {}".format(len(clients)))
|
||||
|
||||
if __name__ == '__main__':
|
||||
exe = os.path.basename(sys.argv[0]).lower()
|
||||
if exe == 'lokinet_peers':
|
||||
peers_main()
|
||||
elif exe == 'lokinet_exit':
|
||||
exit_sessions_main()
|
||||
else:
|
||||
print(
|
||||
'please symlink this as `lokinet_peers` or `lokinet_exit` in munin plugins dir')
|
|
@ -1,4 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from pylokinet.instance import main
|
||||
main()
|
|
@ -1,111 +0,0 @@
|
|||
#
|
||||
# super freaking dead simple wicked awesome bencode library
|
||||
#
|
||||
from io import BytesIO
|
||||
|
||||
class BCodec:
|
||||
encoding = 'utf-8'
|
||||
def __init__(self, fd):
|
||||
self._fd = fd
|
||||
|
||||
def _write_bytestring(self, bs):
|
||||
self._fd.write('{}:'.format(len(bs)).encode('ascii'))
|
||||
self._fd.write(bs)
|
||||
|
||||
def _write_list(self, l):
|
||||
self._fd.write(b'l')
|
||||
for item in l:
|
||||
self.encode(item)
|
||||
self._fd.write(b'e')
|
||||
|
||||
def _write_dict(self, d):
|
||||
self._fd.write(b'd')
|
||||
keys = list(d.keys())
|
||||
keys.sort()
|
||||
for k in keys:
|
||||
if isinstance(k, str):
|
||||
self._write_bytestring(k.encode(self.encoding))
|
||||
elif isinstance(k, bytes):
|
||||
self._write_bytestring(k)
|
||||
else:
|
||||
self._write_bytestring('{}'.format(k).encode(self.encoding))
|
||||
self.encode(d[k])
|
||||
self._fd.write(b'e')
|
||||
|
||||
def _write_int(self, i):
|
||||
self._fd.write('i{}e'.format(i).encode(self.encoding))
|
||||
|
||||
def encode(self, obj):
|
||||
if isinstance(obj, dict):
|
||||
self._write_dict(obj)
|
||||
elif isinstance(obj, list):
|
||||
self._write_list(obj)
|
||||
elif isinstance(obj, int):
|
||||
self._write_int(obj)
|
||||
elif isinstance(obj, str):
|
||||
self._write_bytestring(obj.encode(self.encoding))
|
||||
elif isinstance(obj, bytes):
|
||||
self._write_bytestring(obj)
|
||||
elif hasattr(obj, bencode):
|
||||
obj.bencode(self._fd)
|
||||
else:
|
||||
raise ValueError("invalid object type")
|
||||
|
||||
def _readuntil(self, delim):
|
||||
b = bytes()
|
||||
while True:
|
||||
ch = self._fd.read(1)
|
||||
if ch == delim:
|
||||
return b
|
||||
b += ch
|
||||
|
||||
def _decode_list(self):
|
||||
l = list()
|
||||
while True:
|
||||
b = self._fd.read(1)
|
||||
if b == b'e':
|
||||
return l
|
||||
l.append(self._decode(b))
|
||||
|
||||
def _decode_dict(self):
|
||||
d = dict()
|
||||
while True:
|
||||
ch = self._fd.read(1)
|
||||
if ch == b'e':
|
||||
return d
|
||||
k = self._decode_bytestring(ch)
|
||||
d[k] = self.decode()
|
||||
|
||||
def _decode_int(self):
|
||||
return int(self._readuntil(b'e'), 10)
|
||||
|
||||
def _decode_bytestring(self, ch):
|
||||
ch += self._readuntil(b':')
|
||||
l = int(ch, base=10)
|
||||
return self._fd.read(l)
|
||||
|
||||
def _decode(self, ch):
|
||||
if ch == b'd':
|
||||
return self._decode_dict()
|
||||
elif ch == b'l':
|
||||
return self._decode_list()
|
||||
elif ch == b'i':
|
||||
return self._decode_int()
|
||||
elif ch in [b'0',b'1',b'2',b'3',b'4',b'5',b'6',b'7',b'8',b'9']:
|
||||
return self._decode_bytestring(ch)
|
||||
else:
|
||||
raise ValueError(ch)
|
||||
|
||||
def decode(self):
|
||||
return self._decode(self._fd.read(1))
|
||||
|
||||
|
||||
def bencode(obj):
|
||||
buf = BytesIO()
|
||||
b = BCodec(buf)
|
||||
b.encode(obj)
|
||||
return buf.getvalue()
|
||||
|
||||
def bdecode(bytestring):
|
||||
buf = BytesIO(bytestring)
|
||||
return BCodec(buf).decode()
|
|
@ -1,278 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# python wsgi application for managing many lokinet instances
|
||||
#
|
||||
|
||||
__doc__ = """lokinet bootserv wsgi app
|
||||
also handles webhooks for CI
|
||||
run me with via gunicorn pylokinet.bootserv:app
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from pylokinet import rc
|
||||
import json
|
||||
|
||||
import random
|
||||
import time
|
||||
from datetime import datetime
|
||||
from email.utils import parsedate, format_datetime
|
||||
from dateutil.parser import parse as date_parse
|
||||
import requests
|
||||
|
||||
|
||||
root = './lokinet'
|
||||
|
||||
def _compare_dates(left, right):
|
||||
"""
|
||||
return true if left timestamp is bigger than right
|
||||
"""
|
||||
return date_parse(left) > date_parse(right)
|
||||
|
||||
class TokenHolder:
|
||||
|
||||
_dir = root
|
||||
_token = None
|
||||
|
||||
def __init__(self, f="token"):
|
||||
if not os.path.exists(self._dir):
|
||||
os.mkdir(self._dir, 0o700)
|
||||
f = os.path.join(self._dir, f)
|
||||
if os.path.exists(f):
|
||||
with open(f) as fd:
|
||||
self._token = fd.read().replace("\n", "")
|
||||
|
||||
def verify(self, token):
|
||||
"""
|
||||
return true if token matches
|
||||
"""
|
||||
if self._token is None:
|
||||
return False
|
||||
return self._token == token
|
||||
|
||||
class BinHolder:
|
||||
"""
|
||||
serves a binary file in a dir
|
||||
"""
|
||||
_dir = os.path.join(root, 'bin')
|
||||
|
||||
def __init__(self, f):
|
||||
if not os.path.exists(self._dir):
|
||||
os.mkdir(self._dir, 0o700)
|
||||
self._fpath = os.path.join(self._dir, f)
|
||||
|
||||
def put(self, r):
|
||||
"""
|
||||
put a new file into the place that is held
|
||||
"""
|
||||
with open(self._fpath, "wb") as f:
|
||||
for chunk in r.iter_content(chunk_size=1024):
|
||||
f.write(chunk)
|
||||
|
||||
|
||||
def is_new(self, date):
|
||||
"""
|
||||
return true if last modified timestamp is fresher than current
|
||||
"""
|
||||
t = date_parse('{}'.format(date))
|
||||
if not t:
|
||||
return False
|
||||
if os.path.exists(self._fpath):
|
||||
st = os.stat(self._fpath)
|
||||
return st.st_mtime < t.timestamp()
|
||||
return True
|
||||
|
||||
|
||||
def serve(self, last_modified, respond):
|
||||
"""
|
||||
serve file with caching
|
||||
"""
|
||||
t = parsedate(last_modified)
|
||||
if t:
|
||||
t = time.mktime(t)
|
||||
if t is None:
|
||||
t = 0
|
||||
if not os.path.exists(self._fpath):
|
||||
respond("404 Not Found", [])
|
||||
return []
|
||||
st = os.stat(self._fpath)
|
||||
if st.st_mtime < t:
|
||||
respond("304 Not Modified", [("Last-Modified", format_datetime(st.st_mtime)) ])
|
||||
return []
|
||||
with open(self._fpath, "rb") as f:
|
||||
data = f.read()
|
||||
respond("200 OK", [("Content-Type", "application/octect-stream"),
|
||||
("Last-Modified", format_datetime(datetime.fromtimestamp(int(st.st_mtime)))),("Content-Length", "{}".format(st.st_size))])
|
||||
return [data]
|
||||
|
||||
|
||||
class RCHolder:
|
||||
|
||||
_dir = os.path.join(root, 'nodedb')
|
||||
|
||||
_rc_files = list()
|
||||
|
||||
def __init__(self):
|
||||
if os.path.exists(self._dir):
|
||||
for root, _, files in os.walk(self._dir):
|
||||
for f in files:
|
||||
self._add_rc(os.path.join(root, f))
|
||||
else:
|
||||
os.mkdir(self._dir, 0o700)
|
||||
|
||||
def prune(self):
|
||||
"""
|
||||
remove invalid entries
|
||||
"""
|
||||
delfiles = []
|
||||
for p in self._rc_files:
|
||||
with open(p, 'rb') as f:
|
||||
if not rc.validate(f.read()):
|
||||
delfiles.append(p)
|
||||
for f in delfiles:
|
||||
os.remove(f)
|
||||
|
||||
def validate_then_put(self, body):
|
||||
if not rc.validate(body):
|
||||
return False
|
||||
k = rc.get_pubkey(body)
|
||||
print(k)
|
||||
if k is None:
|
||||
return False
|
||||
with open(os.path.join(self._dir, k), "wb") as f:
|
||||
f.write(body)
|
||||
return True
|
||||
|
||||
def _add_rc(self, fpath):
|
||||
self._rc_files.append(fpath)
|
||||
|
||||
def serve_random(self):
|
||||
with open(random.choice(self._rc_files), 'rb') as f:
|
||||
return f.read()
|
||||
|
||||
def empty(self):
|
||||
return len(self._rc_files) == 0
|
||||
|
||||
|
||||
def handle_rc_upload(body, respond):
|
||||
holder = RCHolder()
|
||||
if holder.validate_then_put(body):
|
||||
respond("200 OK", [("Content-Type", "text/plain")])
|
||||
return ["rc accepted".encode('ascii')]
|
||||
else:
|
||||
respond("400 Bad Request", [("Content-Type", "text/plain")])
|
||||
return ["bad rc".encode('ascii')]
|
||||
|
||||
|
||||
def serve_random_rc():
|
||||
holder = RCHolder()
|
||||
if holder.empty():
|
||||
return None
|
||||
else:
|
||||
return holder.serve_random()
|
||||
|
||||
def response(status, msg, respond):
|
||||
respond(status, [("Content-Type", "text/plain"), ("Content-Length", "{}".format(len(msg)))])
|
||||
return [msg.encode("utf-8")]
|
||||
|
||||
def handle_serve_lokinet(modified_since, respond):
|
||||
l = BinHolder('lokinet.zip')
|
||||
return l.serve(modified_since, respond)
|
||||
|
||||
|
||||
def fetch_lokinet(j, ref="staging", name="build:linux"):
|
||||
holder = BinHolder("lokinet.zip")
|
||||
if 'builds' not in j:
|
||||
return False
|
||||
selected = None
|
||||
attrs = dict()
|
||||
if 'object_attributes' in j:
|
||||
attrs = j['object_attributes']
|
||||
if 'ref' not in attrs or attrs["ref"] != ref:
|
||||
return True
|
||||
|
||||
for build in j['builds']:
|
||||
if 'name' not in build or build['name'] != name:
|
||||
continue
|
||||
if 'status' not in build or build['status'] != 'success':
|
||||
continue
|
||||
if 'finished_at' not in build or build['finished_at'] is None:
|
||||
continue
|
||||
if holder.is_new(build['finished_at']):
|
||||
if selected is None or _compare_dates(build["finished_at"], selected["finished_at"]):
|
||||
selected = build
|
||||
if selected and 'id' in selected:
|
||||
url = 'https://gitlab.com/lokiproject/loki-network/-/jobs/{}/artifacts/download'.format(selected['id'])
|
||||
r = requests.get(url)
|
||||
if r.status_code == 200:
|
||||
holder.put(r)
|
||||
return True
|
||||
|
||||
#if 'artifacts_file' not in selected:
|
||||
# return False
|
||||
#f = selected["artifacts_file"]
|
||||
#return True
|
||||
|
||||
def handle_webhook(j, token, event, respond):
|
||||
"""
|
||||
handle CI webhook
|
||||
"""
|
||||
t = TokenHolder()
|
||||
if not t.verify(token):
|
||||
respond("403 Forbidden", [])
|
||||
return []
|
||||
event = event.lower()
|
||||
if event == 'pipeline hook':
|
||||
if fetch_lokinet(j):
|
||||
respond("200 OK", [])
|
||||
return []
|
||||
else:
|
||||
respond("500 Internal Server Error", [])
|
||||
return []
|
||||
else:
|
||||
respond("404 Not Found", [])
|
||||
return []
|
||||
|
||||
|
||||
def app(environ, start_response):
|
||||
request_body_size = int(environ.get("CONTENT_LENGTH", 0))
|
||||
method = environ.get("REQUEST_METHOD")
|
||||
if method.upper() == "PUT" and request_body_size > 0:
|
||||
rcbody = environ.get("wsgi.input").read(request_body_size)
|
||||
return handle_rc_upload(rcbody, start_response)
|
||||
elif method.upper() == "POST":
|
||||
if environ.get("PATH_INFO") == "/":
|
||||
j = json.loads(environ.get("wsgi.input").read(request_body_size))
|
||||
token = environ.get("HTTP_X_GITLAB_TOKEN")
|
||||
return handle_webhook(j, token, environ.get("HTTP_X_GITLAB_EVENT"), start_response)
|
||||
else:
|
||||
return response("404 Not Found", 'bad url', start_response)
|
||||
elif method.upper() == "GET":
|
||||
if environ.get("PATH_INFO") == "/bootstrap.signed":
|
||||
resp = serve_random_rc()
|
||||
if resp is not None:
|
||||
start_response('200 OK', [("Content-Type", "application/octet-stream")])
|
||||
return [resp]
|
||||
else:
|
||||
return response('404 Not Found', 'no RCs', start_response)
|
||||
elif environ.get("PATH_INFO") == "/ping":
|
||||
return response('200 OK', 'pong', start_response)
|
||||
elif environ.get("PATH_INFO") == "/lokinet.zip":
|
||||
return handle_serve_lokinet(environ.get("HTTP_IF_MODIFIED_SINCE"),start_response)
|
||||
elif environ.get("PATH_INFO") == "/":
|
||||
return response("200 OK", "lokinet bootserv", start_response)
|
||||
else:
|
||||
return response('404 Not Found', 'Not found', start_response)
|
||||
else:
|
||||
return response('405 Method Not Allowed', 'method not allowed', start_response)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
run as cron job
|
||||
"""
|
||||
h = RCHolder()
|
||||
h.prune()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,224 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# lokinet runtime wrapper
|
||||
#
|
||||
|
||||
from ctypes import *
|
||||
import configparser
|
||||
import signal
|
||||
import time
|
||||
import threading
|
||||
import os
|
||||
import sys
|
||||
import requests
|
||||
|
||||
from pylokinet import rc
|
||||
|
||||
lib_file = os.path.join(os.path.realpath('.'), 'liblokinet-shared.so')
|
||||
|
||||
|
||||
def log(msg):
|
||||
sys.stderr.write("lokinet: {}\n".format(msg))
|
||||
sys.stderr.flush()
|
||||
|
||||
|
||||
class LokiNET(threading.Thread):
|
||||
|
||||
lib = None
|
||||
ctx = 0
|
||||
failed = False
|
||||
up = False
|
||||
|
||||
asRouter = True
|
||||
|
||||
def configure(self, lib, conf, ip=None, port=None, ifname=None, seedfile=None, lokid_host=None, lokid_port=None):
|
||||
log("configure lib={} conf={}".format(lib, conf))
|
||||
if not os.path.exists(os.path.dirname(conf)):
|
||||
os.mkdir(os.path.dirname(conf))
|
||||
try:
|
||||
self.lib = CDLL(lib)
|
||||
except OSError as ex:
|
||||
log("failed to load library: {}".format(ex))
|
||||
return False
|
||||
if self.lib.llarp_ensure_config(conf.encode('utf-8'), os.path.dirname(conf).encode('utf-8'), True, self.asRouter):
|
||||
config = configparser.ConfigParser()
|
||||
config.read(conf)
|
||||
log('overwrite ip="{}" port="{}" ifname="{}" seedfile="{}" lokid=("{}", "{}")'.format(
|
||||
ip, port, ifname, seedfile, lokid_host, lokid_port))
|
||||
if seedfile and lokid_host and lokid_port:
|
||||
if not os.path.exists(seedfile):
|
||||
log('cannot access service node seed at "{}"'.format(seedfile))
|
||||
return False
|
||||
config['lokid'] = {
|
||||
'service-node-seed': seedfile,
|
||||
'enabled': "true",
|
||||
'jsonrpc': "{}:{}".format(lokid_host, lokid_port)
|
||||
}
|
||||
if ip:
|
||||
config['router']['public-address'] = '{}'.format(ip)
|
||||
if port:
|
||||
config['router']['public-port'] = '{}'.format(port)
|
||||
if ifname and port:
|
||||
config['bind'] = {
|
||||
ifname: '{}'.format(port)
|
||||
}
|
||||
with open(conf, "w") as f:
|
||||
config.write(f)
|
||||
self.ctx = self.lib.llarp_main_init(conf.encode('utf-8'))
|
||||
else:
|
||||
return False
|
||||
return self.lib.llarp_main_setup(self.ctx, False) == 0
|
||||
|
||||
def inform_fail(self):
|
||||
"""
|
||||
inform lokinet crashed
|
||||
"""
|
||||
self.failed = True
|
||||
self._inform()
|
||||
|
||||
def inform_up(self):
|
||||
self.up = True
|
||||
self._inform()
|
||||
|
||||
def _inform(self):
|
||||
"""
|
||||
inform waiter
|
||||
"""
|
||||
|
||||
def wait_for_up(self, timeout):
|
||||
"""
|
||||
wait for lokinet to go up for :timeout: seconds
|
||||
:return True if we are up and running otherwise False:
|
||||
"""
|
||||
# return self._up.wait(timeout)
|
||||
|
||||
def signal(self, sig):
|
||||
if self.ctx and self.lib:
|
||||
self.lib.llarp_main_signal(self.ctx, int(sig))
|
||||
|
||||
def run(self):
|
||||
# self._up.acquire()
|
||||
self.up = True
|
||||
code = self.lib.llarp_main_run(self.ctx)
|
||||
log("llarp_main_run exited with status {}".format(code))
|
||||
if code:
|
||||
self.inform_fail()
|
||||
self.up = False
|
||||
# self._up.release()
|
||||
|
||||
def close(self):
|
||||
if self.lib and self.ctx:
|
||||
self.lib.llarp_main_free(self.ctx)
|
||||
|
||||
|
||||
def getconf(name, fallback=None):
|
||||
return name in os.environ and os.environ[name] or fallback
|
||||
|
||||
|
||||
def run_main(args):
|
||||
seedfile = getconf("LOKI_SEED_FILE")
|
||||
if seedfile is None:
|
||||
print("LOKI_SEED_FILE was not set")
|
||||
return
|
||||
|
||||
lokid_host = getconf("LOKI_RPC_HOST", "127.0.0.1")
|
||||
lokid_port = getconf("LOKI_RPC_PORT", "22023")
|
||||
|
||||
root = getconf("LOKINET_ROOT")
|
||||
if root is None:
|
||||
print("LOKINET_ROOT was not set")
|
||||
return
|
||||
|
||||
rc_callback = getconf("LOKINET_SUBMIT_URL")
|
||||
if rc_callback is None:
|
||||
print("LOKINET_SUBMIT_URL was not set")
|
||||
return
|
||||
|
||||
bootstrap = getconf("LOKINET_BOOTSTRAP_URL")
|
||||
if bootstrap is None:
|
||||
print("LOKINET_BOOTSTRAP_URL was not set")
|
||||
|
||||
lib = getconf("LOKINET_LIB", lib_file)
|
||||
if not os.path.exists(lib):
|
||||
lib = "liblokinet-shared.so"
|
||||
timeout = int(getconf("LOKINET_TIMEOUT", "5"))
|
||||
ping_interval = int(getconf("LOKINET_PING_INTERVAL", "60"))
|
||||
ping_callback = getconf("LOKINET_PING_URL")
|
||||
ip = getconf("LOKINET_IP")
|
||||
port = getconf("LOKINET_PORT")
|
||||
ifname = getconf("LOKINET_IFNAME")
|
||||
if ping_callback is None:
|
||||
print("LOKINET_PING_URL was not set")
|
||||
return
|
||||
conf = os.path.join(root, "daemon.ini")
|
||||
log("going up")
|
||||
loki = LokiNET()
|
||||
log("bootstrapping...")
|
||||
try:
|
||||
r = requests.get(bootstrap)
|
||||
if r.status_code == 404:
|
||||
log("bootstrap gave no RCs, we are probably the seed node")
|
||||
elif r.status_code != 200:
|
||||
raise Exception("http {}".format(r.status_code))
|
||||
else:
|
||||
data = r.content
|
||||
if rc.validate(data):
|
||||
log("valid RC obtained")
|
||||
with open(os.path.join(root, "bootstrap.signed"), "wb") as f:
|
||||
f.write(data)
|
||||
else:
|
||||
raise Exception("invalid RC")
|
||||
except Exception as ex:
|
||||
log("failed to bootstrap: {}".format(ex))
|
||||
loki.close()
|
||||
return
|
||||
if loki.configure(lib, conf, ip, port, ifname, seedfile, lokid_host, lokid_port):
|
||||
log("configured")
|
||||
|
||||
loki.start()
|
||||
try:
|
||||
log("waiting for spawn")
|
||||
while timeout > 0:
|
||||
time.sleep(1)
|
||||
if loki.failed:
|
||||
log("failed")
|
||||
break
|
||||
log("waiting {}".format(timeout))
|
||||
timeout -= 1
|
||||
if loki.up:
|
||||
log("submitting rc")
|
||||
try:
|
||||
with open(os.path.join(root, 'self.signed'), 'rb') as f:
|
||||
r = requests.put(rc_callback, data=f.read(), headers={
|
||||
"content-type": "application/octect-stream"})
|
||||
log('submit rc reply: HTTP {}'.format(r.status_code))
|
||||
except Exception as ex:
|
||||
log("failed to submit rc: {}".format(ex))
|
||||
loki.signal(signal.SIGINT)
|
||||
time.sleep(2)
|
||||
else:
|
||||
while loki.up:
|
||||
time.sleep(ping_interval)
|
||||
try:
|
||||
r = requests.get(ping_callback)
|
||||
log("ping reply: HTTP {}".format(r.status_code))
|
||||
except Exception as ex:
|
||||
log("failed to submit ping: {}".format(ex))
|
||||
else:
|
||||
log("failed to go up")
|
||||
loki.signal(signal.SIGINT)
|
||||
except KeyboardInterrupt:
|
||||
loki.signal(signal.SIGINT)
|
||||
time.sleep(2)
|
||||
finally:
|
||||
loki.close()
|
||||
else:
|
||||
loki.close()
|
||||
|
||||
|
||||
def main():
|
||||
run_main(sys.argv[1:])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,31 +0,0 @@
|
|||
from pylokinet import bencode
|
||||
import pysodium
|
||||
import binascii
|
||||
import time
|
||||
|
||||
def _expired(ts, lifetime=84600000):
|
||||
"""
|
||||
return True if a timestamp is considered expired
|
||||
lifetime is default 23.5 hours
|
||||
"""
|
||||
return (int(time.time()) * 1000) - ts >= lifetime
|
||||
|
||||
def validate(data):
|
||||
rc = bencode.bdecode(data)
|
||||
if b'z' not in rc or b'k' not in rc:
|
||||
return False
|
||||
sig = rc[b'z']
|
||||
rc[b'z'] = b'\x00' * 64
|
||||
buf = bencode.bencode(rc)
|
||||
try:
|
||||
k = rc[b'k']
|
||||
pysodium.crypto_sign_verify_detached(sig, buf, k)
|
||||
except:
|
||||
return False
|
||||
else:
|
||||
return not _expired(rc[b't'])
|
||||
|
||||
def get_pubkey(data):
|
||||
rc = bencode.bdecode(data)
|
||||
if b'k' in rc:
|
||||
return binascii.hexlify(rc[b'k']).decode('ascii')
|
|
@ -1,27 +0,0 @@
|
|||
# pylokinet
|
||||
|
||||
lokinet with python 3
|
||||
|
||||
# python3 setup.py install
|
||||
|
||||
## bootserv
|
||||
|
||||
bootserv is a bootstrap server for accepting and serving RCs
|
||||
|
||||
$ gunicorn -b 0.0.0.0:8000 pylokinet.bootserv:app
|
||||
|
||||
## pylokinet instance
|
||||
|
||||
obtain `liblokinet-shared.so` from a lokinet build
|
||||
|
||||
run (root):
|
||||
|
||||
# export LOKINET_ROOT=/tmp/lokinet-instance/
|
||||
# export LOKINET_LIB=/path/to/liblokinet-shared.so
|
||||
# export LOKINET_BOOTSTRAP_URL=http://bootserv.ip.address.here:8000/bootstrap.signed
|
||||
# export LOKINET_PING_URL=http://bootserv.ip.address.here:8000/ping
|
||||
# export LOKINET_SUBMIT_URL=http://bootserv.ip.address.here:8000/
|
||||
# export LOKINET_IP=public.ip.goes.here
|
||||
# export LOKINET_PORT=1090
|
||||
# export LOKINET_IFNAME=eth0
|
||||
# python3 -m pylokinet
|
|
@ -1,14 +0,0 @@
|
|||
from setuptools import setup, find_packages
|
||||
|
||||
|
||||
|
||||
setup(
|
||||
name="pylokinet",
|
||||
version="0.0.1",
|
||||
license="ZLIB",
|
||||
author="jeff",
|
||||
author_email="jeff@i2p.rocks",
|
||||
description="lokinet python bindings",
|
||||
url="https://github.com/loki-project/loki-network",
|
||||
install_requires=["pysodium", "requests", "python-dateutil"],
|
||||
packages=find_packages())
|
|
@ -1,2 +0,0 @@
|
|||
__pycache__
|
||||
*.private
|
|
@ -1,112 +0,0 @@
|
|||
#
|
||||
# super freaking dead simple wicked awesome bencode library
|
||||
#
|
||||
from io import BytesIO
|
||||
|
||||
class BCodec:
|
||||
encoding = 'utf-8'
|
||||
def __init__(self, fd):
|
||||
self._fd = fd
|
||||
|
||||
def _write_bytestring(self, bs):
|
||||
self._fd.write('{}:'.format(len(bs)).encode('ascii'))
|
||||
self._fd.write(bs)
|
||||
|
||||
def _write_list(self, l):
|
||||
self._fd.write(b'l')
|
||||
for item in l:
|
||||
self.encode(item)
|
||||
self._fd.write(b'e')
|
||||
|
||||
def _write_dict(self, d):
|
||||
self._fd.write(b'd')
|
||||
keys = list(d.keys())
|
||||
keys.sort()
|
||||
for k in keys:
|
||||
if isinstance(k, str):
|
||||
self._write_bytestring(k.encode(self.encoding))
|
||||
elif isinstance(k, bytes):
|
||||
self._write_bytestring(k)
|
||||
else:
|
||||
self._write_bytestring('{}'.format(k).encode(self.encoding))
|
||||
self.encode(d[k])
|
||||
self._fd.write(b'e')
|
||||
|
||||
def _write_int(self, i):
|
||||
self._fd.write('i{}e'.format(i).encode(self.encoding))
|
||||
|
||||
def encode(self, obj):
|
||||
if isinstance(obj, dict):
|
||||
self._write_dict(obj)
|
||||
elif isinstance(obj, list):
|
||||
self._write_list(obj)
|
||||
elif isinstance(obj, int):
|
||||
self._write_int(obj)
|
||||
elif isinstance(obj, str):
|
||||
self._write_bytestring(obj.encode(self.encoding))
|
||||
elif isinstance(obj, bytes):
|
||||
self._write_bytestring(obj)
|
||||
elif hasattr(obj, bencode):
|
||||
obj.bencode(self._fd)
|
||||
else:
|
||||
raise ValueError("invalid object type")
|
||||
|
||||
def _readuntil(self, delim):
|
||||
b = bytes()
|
||||
while True:
|
||||
ch = self._fd.read(1)
|
||||
if ch == delim:
|
||||
return b
|
||||
b += ch
|
||||
|
||||
def _decode_list(self):
|
||||
l = list()
|
||||
while True:
|
||||
b = self._fd.read(1)
|
||||
if b == b'e':
|
||||
return l
|
||||
l.append(self._decode(b))
|
||||
|
||||
def _decode_dict(self):
|
||||
d = dict()
|
||||
while True:
|
||||
ch = self._fd.read(1)
|
||||
if ch == b'e':
|
||||
return d
|
||||
k = self._decode_bytestring(ch)
|
||||
d[k] = self.decode()
|
||||
|
||||
def _decode_int(self):
|
||||
return int(self._readuntil(b'e'), 10)
|
||||
|
||||
def _decode_bytestring(self, ch):
|
||||
ch += self._readuntil(b':')
|
||||
l = int(ch, base=10)
|
||||
return self._fd.read(l)
|
||||
|
||||
def _decode(self, ch):
|
||||
if ch == b'd':
|
||||
return self._decode_dict()
|
||||
elif ch == b'l':
|
||||
return self._decode_list()
|
||||
elif ch == b'i':
|
||||
return self._decode_int()
|
||||
elif ch in [b'0',b'1',b'2',b'3',b'4',b'5',b'6',b'7',b'8',b'9']:
|
||||
return self._decode_bytestring(ch)
|
||||
else:
|
||||
raise ValueError(ch)
|
||||
|
||||
def decode(self):
|
||||
return self._decode(self._fd.read(1))
|
||||
|
||||
|
||||
def bencode(obj):
|
||||
buf = BytesIO()
|
||||
b = BCodec(buf)
|
||||
b.encode(obj)
|
||||
return buf.bytes()
|
||||
|
||||
def bdecode(bytestring):
|
||||
buf = BytesIO()
|
||||
buf.write(bytestring)
|
||||
return BCodec(buf).decode()
|
|
@ -1,138 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
import bencode
|
||||
import sys
|
||||
import libnacl
|
||||
import struct
|
||||
from io import BytesIO
|
||||
import time
|
||||
from multiprocessing import Process, Array, Value
|
||||
|
||||
|
||||
def print_help():
|
||||
print('usage: {} keyfile.private prefix numthreads'.format(sys.argv[0]))
|
||||
return 1
|
||||
|
||||
|
||||
_zalpha = ['y', 'b', 'n', 'd', 'r', 'f', 'g', '8',
|
||||
'e', 'j', 'k', 'm', 'c', 'p', 'q', 'x',
|
||||
'o', 't', '1', 'u', 'w', 'i', 's', 'z',
|
||||
'a', '3', '4', '5', 'h', '7', '6', '9']
|
||||
|
||||
|
||||
def zb32_encode(buf):
|
||||
s = str()
|
||||
bits = 0
|
||||
l = len(buf)
|
||||
idx = 0
|
||||
tmp = buf[idx]
|
||||
while bits > 0 or idx < l:
|
||||
if bits < 5:
|
||||
if idx < l:
|
||||
tmp <<= 8
|
||||
tmp |= buf[idx] & 0xff
|
||||
idx += 1
|
||||
bits += 8
|
||||
else:
|
||||
tmp <<= 5 - bits
|
||||
bits = 5
|
||||
bits -= 5
|
||||
s += _zalpha[(tmp >> bits) & 0x1f]
|
||||
return s
|
||||
|
||||
|
||||
def _gen_si(keys):
|
||||
e = keys[b'e'][32:]
|
||||
s = keys[b's'][32:]
|
||||
v = keys[b'v']
|
||||
return {'e': e, 's': s, 'v': v}
|
||||
|
||||
|
||||
class AddrGen:
|
||||
|
||||
def __init__(self, threads, keys, prefix):
|
||||
self._inc = threads
|
||||
self._keys = keys
|
||||
self._c = Value('i')
|
||||
self.sync = Array('i', 3)
|
||||
self._procs = []
|
||||
self.prefix = prefix
|
||||
|
||||
def runit(self):
|
||||
for ch in self.prefix:
|
||||
if ch not in _zalpha:
|
||||
print("invalid prefix, {} not a valid character".format(ch))
|
||||
return None, None
|
||||
print("find ^{}.loki".format(self.prefix))
|
||||
i = self._inc
|
||||
while i > 0:
|
||||
p = Process(target=self._gen_addr_tick, args=(self.prefix, abs(
|
||||
libnacl.randombytes_random()), abs(libnacl.randombytes_random()), _gen_si(self._keys)))
|
||||
p.start()
|
||||
self._procs.append(p)
|
||||
i -= 1
|
||||
return self._runner()
|
||||
|
||||
def _gen_addr_tick(self, prefix, lo, hi, si):
|
||||
print(prefix)
|
||||
fd = BytesIO()
|
||||
addr = ''
|
||||
enc = bencode.BCodec(fd)
|
||||
while self.sync[2] == 0:
|
||||
si['x'] = struct.pack('>QQ', lo, hi)
|
||||
fd.seek(0, 0)
|
||||
enc.encode(si)
|
||||
pub = bytes(fd.getbuffer())
|
||||
addr = zb32_encode(libnacl.crypto_generichash(pub))
|
||||
if addr.startswith(prefix):
|
||||
self.sync[2] = 1
|
||||
self.sync[0] = hi
|
||||
self.sync[1] = lo
|
||||
return
|
||||
hi += self._inc
|
||||
if hi == 0:
|
||||
lo += 1
|
||||
self._c.value += 1
|
||||
|
||||
def _print_stats(self):
|
||||
print('{} H/s'.format(self._c.value))
|
||||
self._c.value = 0
|
||||
|
||||
def _joinall(self):
|
||||
for p in self._procs:
|
||||
p.join()
|
||||
|
||||
def _runner(self):
|
||||
while self.sync[2] == 0:
|
||||
time.sleep(1)
|
||||
self._print_stats()
|
||||
self._joinall()
|
||||
fd = BytesIO()
|
||||
enc = bencode.BCodec(fd)
|
||||
hi = self.sync[0]
|
||||
lo = self.sync[1]
|
||||
si = _gen_si(self._keys)
|
||||
si['x'] = struct.pack('>QQ', lo, hi)
|
||||
enc.encode(si)
|
||||
pub = bytes(fd.getbuffer())
|
||||
addr = zb32_encode(libnacl.crypto_generichash(pub))
|
||||
return si['x'], addr
|
||||
|
||||
|
||||
def main(args):
|
||||
if len(args) != 3:
|
||||
return print_help()
|
||||
keys = None
|
||||
with open(args[0], 'rb') as fd:
|
||||
dec = bencode.BCodec(fd)
|
||||
keys = dec.decode()
|
||||
runner = AddrGen(int(args[2]), keys, args[1])
|
||||
keys[b'x'], addr = runner.runit()
|
||||
if addr:
|
||||
print("found {}.loki".format(addr))
|
||||
with open(args[0], 'wb') as fd:
|
||||
enc = bencode.BCodec(fd)
|
||||
enc.encode(keys)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv[1:])
|
|
@ -1,10 +0,0 @@
|
|||
# lokinet vanity address generator
|
||||
|
||||
installing deps:
|
||||
|
||||
sudo apt install libsodium-dev
|
||||
pip3 install --user -r requirements.txt
|
||||
|
||||
to generate a nonce with a prefix `^7oki` using 8 cpu threads:
|
||||
|
||||
python3 lokinet-vanity.py keyfile.private 7oki 8
|
|
@ -1 +0,0 @@
|
|||
libnacl
|
|
@ -1,141 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import configparser
|
||||
import sys
|
||||
import os
|
||||
|
||||
from xml.etree import ElementTree as etree
|
||||
|
||||
|
||||
def getSetting(s, name, fallback): return name in s and s[name] or fallback
|
||||
|
||||
|
||||
shadowRoot = getSetting(os.environ, "SHADOW_ROOT",
|
||||
os.path.join(os.environ['HOME'], '.shadow'))
|
||||
|
||||
libpath = 'libshadow-plugin-lokinet.so'
|
||||
|
||||
|
||||
def nodeconf(conf, baseDir, name, ifname=None, port=None):
|
||||
conf['netdb'] = {'dir': 'tmp-nodes'}
|
||||
conf['router'] = {}
|
||||
conf['router']['contact-file'] = os.path.join(
|
||||
baseDir, '{}.signed'.format(name))
|
||||
conf['router']['ident-privkey'] = os.path.join(
|
||||
baseDir, '{}-ident.key'.format(name))
|
||||
conf['router']['transport-privkey'] = os.path.join(
|
||||
baseDir, '{}-transport.key'.format(name))
|
||||
if ifname and port:
|
||||
conf['bind'] = {ifname: port}
|
||||
conf['connect'] = {}
|
||||
|
||||
|
||||
def addPeer(conf, baseDir, peer):
|
||||
conf['connect'][peer] = os.path.join(baseDir, '{}.signed'.format(peer))
|
||||
|
||||
|
||||
def createNode(pluginName, root, peer, life=600):
|
||||
node = etree.SubElement(root, 'node')
|
||||
node.attrib['id'] = peer['name']
|
||||
node.attrib['interfacebuffer'] = '{}'.format(1024 * 1024 * 100)
|
||||
app = etree.SubElement(node, 'process')
|
||||
app.attrib['plugin'] = pluginName
|
||||
app.attrib['time'] = '{}'.format(life)
|
||||
app.attrib['arguments'] = peer['configfile']
|
||||
|
||||
|
||||
def makeBase(settings, name, id):
|
||||
return {
|
||||
'id': id,
|
||||
'name': name,
|
||||
'contact-file': os.path.join(getSetting(settings, 'baseDir', 'tmp'), '{}.signed'.format(name)),
|
||||
'configfile': os.path.join(getSetting(settings, 'baseDir', 'tmp'), '{}.ini'.format(name)),
|
||||
'config': configparser.ConfigParser()
|
||||
}
|
||||
|
||||
|
||||
def makeClient(settings, name, id):
|
||||
peer = makeBase(settings, name, id)
|
||||
basedir = getSetting(settings, 'baseDir', 'tmp')
|
||||
nodeconf(peer['config'], basedir, name)
|
||||
peer['config']['network'] = {
|
||||
'type': 'null',
|
||||
'tag': 'test',
|
||||
'prefetch-tag': 'test'
|
||||
}
|
||||
return peer
|
||||
|
||||
|
||||
def makeSVCNode(settings, name, id, port):
|
||||
peer = makeBase(settings, name, id)
|
||||
nodeconf(peer['config'], getSetting(
|
||||
settings, 'baseDir', 'tmp'), name, 'eth0', port)
|
||||
peer['config']['network'] = {
|
||||
'type': 'null'
|
||||
}
|
||||
return peer
|
||||
|
||||
|
||||
def genconf(settings, outf):
|
||||
root = etree.Element('shadow')
|
||||
root.attrib["environment"] = 'LLARP_SHADOW=1'
|
||||
topology = etree.SubElement(root, 'topology')
|
||||
topology.attrib['path'] = getSetting(settings, 'topology', os.path.join(
|
||||
shadowRoot, 'share', 'topology.graphml.xml'))
|
||||
|
||||
pluginName = getSetting(settings, 'name', 'lokinet-shared')
|
||||
|
||||
kill = etree.SubElement(root, 'kill')
|
||||
kill.attrib['time'] = getSetting(settings, 'runFor', '600')
|
||||
|
||||
baseDir = getSetting(settings, 'baseDir',
|
||||
os.path.join('/tmp', 'lokinet-shadow'))
|
||||
|
||||
if not os.path.exists(baseDir):
|
||||
os.mkdir(baseDir)
|
||||
|
||||
plugin = etree.SubElement(root, "plugin")
|
||||
plugin.attrib['id'] = pluginName
|
||||
plugin.attrib['path'] = libpath
|
||||
basePort = getSetting(settings, 'svc-base-port', 19000)
|
||||
svcNodeCount = getSetting(settings, 'service-nodes', 80)
|
||||
peers = list()
|
||||
for nodeid in range(svcNodeCount):
|
||||
peers.append(makeSVCNode(
|
||||
settings, 'svc-node-{}'.format(nodeid), str(nodeid), basePort + 1))
|
||||
basePort += 1
|
||||
|
||||
# make all service nodes know each other
|
||||
for peer in peers:
|
||||
for nodeid in range(svcNodeCount):
|
||||
if str(nodeid) != peer['id']:
|
||||
addPeer(peer['config'], baseDir, 'svc-node-{}'.format(nodeid))
|
||||
|
||||
# add client nodes
|
||||
for nodeid in range(getSetting(settings, 'client-nodes', 200)):
|
||||
peer = makeClient(
|
||||
settings, 'client-node-{}'.format(nodeid), str(nodeid))
|
||||
peers.append(peer)
|
||||
for p in range(getSetting(settings, 'client-connect-to', 10)):
|
||||
addPeer(peer['config'], baseDir,
|
||||
'svc-node-{}'.format((p + nodeid) % svcNodeCount))
|
||||
|
||||
# generate xml and settings files
|
||||
for peer in peers:
|
||||
createNode(pluginName, root, peer)
|
||||
|
||||
with open(peer['configfile'], 'w') as f:
|
||||
peer['config'].write(f)
|
||||
|
||||
# render
|
||||
outf.write(etree.tostring(root).decode('utf-8'))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
settings = {
|
||||
'baseDir': os.path.join("/tmp", "lokinet-shadow"),
|
||||
'topology': os.path.join(shadowRoot, 'share', 'topology.graphml.xml'),
|
||||
'runFor': '{}'.format(60 * 10 * 10)
|
||||
}
|
||||
with open(sys.argv[1], 'w') as f:
|
||||
genconf(settings, f)
|
|
@ -1 +0,0 @@
|
|||
v/
|
|
@ -1,157 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# this script generate supervisord configs for running a test network on loopback
|
||||
#
|
||||
|
||||
|
||||
from argparse import ArgumentParser as AP
|
||||
from configparser import ConfigParser as CP
|
||||
|
||||
import os
|
||||
|
||||
|
||||
def svcNodeName(id): return 'svc-node-%03d' % id
|
||||
|
||||
|
||||
def clientNodeName(id): return 'client-node-%03d' % id
|
||||
|
||||
|
||||
def main():
|
||||
ap = AP()
|
||||
ap.add_argument('--valgrind', type=bool, default=False)
|
||||
ap.add_argument('--dir', type=str, default='testnet_tmp')
|
||||
ap.add_argument('--svc', type=int, default=20,
|
||||
help='number of service nodes')
|
||||
ap.add_argument('--baseport', type=int, default=19000)
|
||||
ap.add_argument('--clients', type=int, default=200,
|
||||
help='number of client nodes')
|
||||
ap.add_argument('--bin', type=str, required=True)
|
||||
ap.add_argument('--out', type=str, required=True)
|
||||
ap.add_argument('--connect', type=int, default=10)
|
||||
ap.add_argument('--ip', type=str, default=None)
|
||||
ap.add_argument('--ifname', type=str, default='lo')
|
||||
ap.add_argument('--netid', type=str, default=None)
|
||||
ap.add_argument('--loglevel', type=str, default='debug')
|
||||
args = ap.parse_args()
|
||||
|
||||
if args.valgrind:
|
||||
exe = 'valgrind {}'.format(args.bin)
|
||||
else:
|
||||
exe = '{} -v'.format(args.bin)
|
||||
basedir = os.path.abspath(args.dir)
|
||||
|
||||
for nodeid in range(args.svc):
|
||||
config = CP()
|
||||
config['router'] = {
|
||||
'data-dir': '.',
|
||||
'net-threads': '1',
|
||||
'worker-threads': '4',
|
||||
'nickname': svcNodeName(nodeid),
|
||||
'min-connections': "{}".format(args.connect)
|
||||
}
|
||||
if args.netid:
|
||||
config['router']['netid'] = args.netid
|
||||
|
||||
if args.ip:
|
||||
config['router']['public-ip'] = args.ip
|
||||
config['router']['public-port'] = str(args.baseport + nodeid)
|
||||
|
||||
config['bind'] = {
|
||||
args.ifname: str(args.baseport + nodeid)
|
||||
}
|
||||
config["logging"] = {
|
||||
"level": args.loglevel
|
||||
}
|
||||
config['netdb'] = {
|
||||
'dir': 'netdb'
|
||||
}
|
||||
config['network'] = {
|
||||
'type' : 'null',
|
||||
'save-profiles': 'false'
|
||||
}
|
||||
config['api'] = {
|
||||
'enabled': 'false'
|
||||
}
|
||||
config['lokid'] = {
|
||||
'enabled': 'false',
|
||||
}
|
||||
config["logging"] = {
|
||||
"level": args.loglevel
|
||||
}
|
||||
d = os.path.join(args.dir, svcNodeName(nodeid))
|
||||
if not os.path.exists(d):
|
||||
os.mkdir(d)
|
||||
fp = os.path.join(d, 'daemon.ini')
|
||||
with open(fp, 'w') as f:
|
||||
config.write(f)
|
||||
for n in [0]:
|
||||
if nodeid is not 0:
|
||||
f.write("[bootstrap]\nadd-node={}\n".format(os.path.join(basedir,svcNodeName(n), 'self.signed')))
|
||||
else:
|
||||
f.write("[bootstrap]\nseed-node=true\n")
|
||||
|
||||
for nodeid in range(args.clients):
|
||||
config = CP()
|
||||
|
||||
config['router'] = {
|
||||
'data-dir': '.',
|
||||
'net-threads': '1',
|
||||
'worker-threads': '2',
|
||||
'nickname': clientNodeName(nodeid)
|
||||
}
|
||||
if args.netid:
|
||||
config['router']['netid'] = args.netid
|
||||
|
||||
config["logging"] = {
|
||||
"level": args.loglevel
|
||||
}
|
||||
|
||||
config['netdb'] = {
|
||||
'dir': 'netdb'
|
||||
}
|
||||
config['api'] = {
|
||||
'enabled': 'false'
|
||||
}
|
||||
config['network'] = {
|
||||
'type' : 'null'
|
||||
}
|
||||
d = os.path.join(args.dir, clientNodeName(nodeid))
|
||||
if not os.path.exists(d):
|
||||
os.mkdir(d)
|
||||
fp = os.path.join(d, 'client.ini')
|
||||
with open(fp, 'w') as f:
|
||||
config.write(f)
|
||||
for n in [0]:
|
||||
otherID = (n + nodeid) % args.svc
|
||||
f.write("[bootstrap]\nadd-node={}\n".format(os.path.join(basedir,svcNodeName(otherID), 'self.signed')))
|
||||
|
||||
with open(args.out, 'w') as f:
|
||||
basedir = os.path.join(args.dir, 'svc-node-%(process_num)03d')
|
||||
f.write('''[program:svc-node]
|
||||
directory = {}
|
||||
command = {} -r {}/daemon.ini
|
||||
autorestart=true
|
||||
redirect_stderr=true
|
||||
#stdout_logfile=/dev/fd/1
|
||||
stdout_logfile={}/svc-node-%(process_num)03d-log.txt
|
||||
stdout_logfile_maxbytes=0
|
||||
process_name = svc-node-%(process_num)03d
|
||||
numprocs = {}
|
||||
'''.format(basedir, exe, basedir, args.dir, args.svc))
|
||||
basedir = os.path.join(args.dir, 'client-node-%(process_num)03d')
|
||||
f.write('''[program:Client-node]
|
||||
directory = {}
|
||||
command = bash -c "sleep 5 && {} {}/client.ini"
|
||||
autorestart=true
|
||||
redirect_stderr=true
|
||||
#stdout_logfile=/dev/fd/1
|
||||
stdout_logfile={}/client-node-%(process_num)03d-log.txt
|
||||
stdout_logfile_maxbytes=0
|
||||
process_name = client-node-%(process_num)03d
|
||||
numprocs = {}
|
||||
'''.format(basedir, exe, basedir, args.dir, args.clients))
|
||||
f.write('[supervisord]\ndirectory=.\n')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,23 +0,0 @@
|
|||
loopback testnet scripts
|
||||
|
||||
requirements:
|
||||
|
||||
* bash
|
||||
* python3
|
||||
* supervisord
|
||||
|
||||
|
||||
setup:
|
||||
|
||||
make a testnet compatable lokinet build:
|
||||
|
||||
$ cmake -DWITH_TESTNET=ON -B build-testnet -S .
|
||||
$ make -C build-testnet lokinet
|
||||
|
||||
usage:
|
||||
|
||||
from root of repo run:
|
||||
|
||||
$ ./contrib/testnet/testnet.sh build-testnet/daemon/lokinet 20 200
|
||||
|
||||
this will spin up 20 service nodes and 200 clients
|
|
@ -1,2 +0,0 @@
|
|||
flask
|
||||
pynacl
|
|
@ -1,15 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
for arg in "$1" "$2" "$3" ; do
|
||||
test x = "x$arg" && echo "usage: $0 path/to/lokinet num_svc num_clients" && exit 1;
|
||||
done
|
||||
|
||||
script_root=$(dirname $(readlink -e $0))
|
||||
testnet_dir=/tmp/lokinet-testnet
|
||||
|
||||
mkdir -p $testnet_dir
|
||||
|
||||
set -x
|
||||
|
||||
$script_root/genconf.py --bin $1 --netid=testnet --out=$testnet_dir/testnet.ini --svc $2 --dir=$testnet_dir --clients $3 || exit 1
|
||||
supervisord -n -c $testnet_dir/testnet.ini
|
Loading…
Reference in New Issue