Update the structure and the paths

This commit is contained in:
TheophileDiot 2022-11-11 14:55:04 +01:00
parent 04578aab3f
commit edce79936a
4917 changed files with 18397 additions and 18812 deletions

View File

@ -1 +0,0 @@
CVE-2022-30065

View File

@ -1,85 +0,0 @@
FROM nginx:1.20.2-alpine AS builder
# Copy dependencies sources folder
COPY bw/deps /tmp/bunkerweb/deps
# Compile and install dependencies
RUN apk add --no-cache --virtual build bash build autoconf libtool automake geoip-dev g++ gcc curl-dev libxml2-dev pcre-dev make linux-headers musl-dev gd-dev gnupg brotli-dev openssl-dev patch readline-dev && \
mkdir -p /opt/bunkerweb/deps && \
chmod +x /tmp/bunkerweb/deps/install.sh && \
bash /tmp/bunkerweb/deps/install.sh && \
apk del build
# Copy python requirements
COPY bw/deps/requirements.txt /opt/bunkerweb/deps/requirements.txt
# Install python requirements
RUN apk add --no-cache --virtual build py3-pip g++ gcc python3-dev musl-dev libffi-dev openssl-dev cargo && \
pip install --no-cache-dir --upgrade pip && \
pip install wheel && \
mkdir /opt/bunkerweb/deps/python && \
pip install --no-cache-dir --require-hashes --target /opt/bunkerweb/deps/python -r /opt/bunkerweb/deps/requirements.txt && \
apk del build
FROM nginx:1.20.2-alpine
# Copy dependencies
COPY --from=builder /opt/bunkerweb /opt/bunkerweb
# Copy files
# can't exclude deps from . so we are copying everything by hand
COPY bw/api /opt/bunkerweb/api
COPY bw/confs /opt/bunkerweb/confs
COPY bw/core /opt/bunkerweb/core
COPY bw/cli /opt/bunkerweb/cli
COPY bw/gen /opt/bunkerweb/gen
COPY bw/helpers /opt/bunkerweb/helpers
COPY bw/loading /opt/bunkerweb/loading
COPY bw/lua /opt/bunkerweb/lua
COPY bw/misc /opt/bunkerweb/misc
COPY bw/settings.json /opt/bunkerweb/settings.json
COPY db /opt/bunkerweb/db
COPY utils /opt/bunkerweb/utils
COPY VERSION /opt/bunkerweb/VERSION
# Install runtime dependencies, pypi packages, move bwcli, create data folders and set permissions
RUN apk add --no-cache bash python3 libgcc libstdc++ openssl git && \
cp /opt/bunkerweb/helpers/bwcli /usr/local/bin && \
echo "Docker" > /opt/bunkerweb/INTEGRATION && \
for dir in $(echo "cache configs plugins www") ; do mkdir -p "/data/${dir}" && ln -s "/data/${dir}" "/opt/bunkerweb/${dir}" ; done && \
for dir in $(echo "configs/http configs/stream configs/server-http configs/server-stream configs/default-server-http configs/default-server-stream configs/modsec configs/modsec-crs cache/letsencrypt") ; do mkdir -p "/data/${dir}" ; done && \
chown -R root:nginx /data && \
chmod -R 770 /data && \
mkdir /opt/bunkerweb/tmp && \
chown -R root:nginx /opt/bunkerweb && \
find /opt/bunkerweb -type f -exec chmod 0740 {} \; && \
find /opt/bunkerweb -type d -exec chmod 0750 {} \; && \
chmod 770 /opt/bunkerweb/cache /opt/bunkerweb/tmp && \
chmod 750 /opt/bunkerweb/cli/main.py /opt/bunkerweb/gen/main.py /opt/bunkerweb/helpers/*.sh /usr/local/bin/bwcli /opt/bunkerweb/deps/python/bin/* && \
chown root:nginx /usr/local/bin/bwcli && \
chown -R nginx:nginx /etc/nginx && \
mkdir /var/log/letsencrypt /var/lib/letsencrypt && \
chown root:nginx /var/log/letsencrypt /var/lib/letsencrypt && \
chmod 770 /var/log/letsencrypt /var/lib/letsencrypt && \
chown -R root:nginx /etc/nginx && \
chmod -R 770 /etc/nginx && \
rm -f /var/log/nginx/* && \
ln -s /proc/1/fd/2 /var/log/nginx/error.log && \
ln -s /proc/1/fd/2 /var/log/nginx/modsec_audit.log && \
ln -s /proc/1/fd/1 /var/log/nginx/access.log && \
ln -s /proc/1/fd/1 /var/log/nginx/jobs.log && \
ln -s /proc/1/fd/1 /var/log/letsencrypt/letsencrypt.log && \
chmod 660 /opt/bunkerweb/INTEGRATION
# Fix CVEs
RUN apk add "freetype>=2.10.4-r3" "curl>=7.79.1-r2" "libcurl>=7.79.1-r2" "openssl>=1.1.1q-r0" "libssl1.1>=1.1.1q-r0" "libcrypto1.1>=1.1.1q-r0" "git>=2.32.3-r0" "ncurses-libs>=6.2_p20210612-r1" "ncurses-terminfo-base>=6.2_p20210612-r1" "zlib>=1.2.12-r2" "libxml2>=2.9.14-r1"
VOLUME /data /etc/nginx
EXPOSE 8080/tcp 8443/tcp
USER nginx:nginx
HEALTHCHECK --interval=10s --timeout=10s --start-period=10s --retries=6 CMD /opt/bunkerweb/helpers/healthcheck.sh
ENTRYPOINT ["/opt/bunkerweb/helpers/entrypoint.sh"]

View File

@ -1,5 +1,5 @@
<p align="center">
<img alt="BunkerWeb logo" src="https://github.com/bunkerity/bunkerweb/raw/master/logo.png" />
<img alt="BunkerWeb logo" src="https://github.com/bunkerity/bunkerweb/raw/master/misc/logo.png" />
</p>
<p align="center">

View File

@ -1 +0,0 @@
1.4.3

View File

@ -1,45 +0,0 @@
FROM python:3.11-alpine
# Copy python requirements
COPY bw/deps/requirements.txt /opt/bunkerweb/deps/requirements.txt
# Install dependencies
RUN apk add --no-cache --virtual build g++ gcc python3-dev musl-dev libffi-dev openssl-dev cargo && \
pip install --no-cache-dir --upgrade pip && \
pip install wheel && \
mkdir /opt/bunkerweb/deps/python && \
pip install --no-cache-dir --require-hashes --target /opt/bunkerweb/deps/python -r /opt/bunkerweb/deps/requirements.txt && \
apk del build
# Copy files
# can't exclude specific files/dir from . so we are copying everything by hand
COPY autoconf /opt/bunkerweb/autoconf
COPY bw/api /opt/bunkerweb/api
COPY bw/cli /opt/bunkerweb/cli
COPY bw/core /opt/bunkerweb/core
COPY bw/helpers /opt/bunkerweb/helpers
COPY bw/settings.json /opt/bunkerweb/settings.json
COPY db /opt/bunkerweb/db
COPY utils /opt/bunkerweb/utils
# Add nginx user, drop bwcli, setup data folders, permissions and logging
RUN apk add --no-cache bash && \
addgroup -g 101 nginx && \
adduser -h /var/cache/nginx -g nginx -s /bin/sh -G nginx -D -H -u 101 nginx && \
cp /opt/bunkerweb/helpers/bwcli /usr/local/bin && \
chown -R nginx:nginx /opt/bunkerweb && \
find /opt/bunkerweb -type f -exec chmod 0740 {} \; && \
find /opt/bunkerweb -type d -exec chmod 0750 {} \; && \
chmod 750 /opt/bunkerweb/cli/main.py /opt/bunkerweb/helpers/*.sh /usr/local/bin/bwcli /opt/bunkerweb/autoconf/main.py /opt/bunkerweb/deps/python/bin/* && \
chown root:nginx /usr/local/bin/bwcli
# Fix CVEs
RUN apk add "libssl1.1>=1.1.1q-r0" "libcrypto1.1>=1.1.1q-r0" "git>=2.32.3-r0" "ncurses-libs>=6.2_p20210612-r1" "ncurses-terminfo-base>=6.2_p20210612-r1" "libtirpc>=1.3.2-r1" "libtirpc-conf>=1.3.2-r1" "zlib>=1.2.12-r2" "libxml2>=2.9.14-r1"
VOLUME /data /etc/nginx
WORKDIR /opt/bunkerweb/autoconf
USER root:nginx
CMD ["python3", "/opt/bunkerweb/autoconf/main.py"]

View File

@ -1,67 +0,0 @@
#!/usr/bin/python3
from os import _exit, getenv
from signal import SIGINT, SIGTERM, signal
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
sys_path.append("/opt/bunkerweb/api")
sys_path.append("/opt/bunkerweb/db")
from logger import setup_logger
from SwarmController import SwarmController
from IngressController import IngressController
from DockerController import DockerController
# Get variables
logger = setup_logger("Autoconf", getenv("LOG_LEVEL", "INFO"))
swarm = getenv("SWARM_MODE", "no") == "yes"
kubernetes = getenv("KUBERNETES_MODE", "no") == "yes"
docker_host = getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
wait_retry_interval = int(getenv("WAIT_RETRY_INTERVAL", "5"))
def exit_handler(signum, frame):
logger.info("Stop signal received, exiting...")
_exit(0)
signal(SIGINT, exit_handler)
signal(SIGTERM, exit_handler)
try:
# Instantiate the controller
if swarm:
logger.info("Swarm mode detected")
controller = SwarmController(docker_host)
elif kubernetes:
logger.info("Kubernetes mode detected")
controller = IngressController()
else:
logger.info("Docker mode detected")
controller = DockerController(docker_host)
# Wait for instances
logger.info("Waiting for BunkerWeb instances ...")
instances = controller.wait(wait_retry_interval)
logger.info("BunkerWeb instances are ready 🚀")
i = 1
for instance in instances:
logger.info(f"Instance #{i} : {instance['name']}")
i += 1
# Run first configuration
ret = controller.apply_config()
if not ret:
logger.error("Error while applying initial configuration")
_exit(1)
# Process events
logger.info("Processing events ...")
controller.process_events()
except:
logger.error(f"Exception while running autoconf :\n{format_exc()}")
sys_exit(1)

View File

@ -1,54 +0,0 @@
#!/usr/bin/env python3
from argparse import ArgumentParser
from os import _exit
from sys import exit as sys_exit, path
from traceback import format_exc
path.append("/opt/bunkerweb/deps/python")
path.append("/opt/bunkerweb/cli")
path.append("/opt/bunkerweb/utils")
path.append("/opt/bunkerweb/api")
from logger import setup_logger
from CLI import CLI
if __name__ == "__main__":
logger = setup_logger("CLI", "INFO")
try:
# Global parser
parser = ArgumentParser(description="BunkerWeb Command Line Interface")
subparsers = parser.add_subparsers(help="command", dest="command")
# Unban subparser
parser_unban = subparsers.add_parser(
"unban", help="remove a ban from the cache"
)
parser_unban.add_argument("ip", type=str, help="IP address to unban")
# Parse args
args = parser.parse_args()
# Instantiate CLI
cli = CLI()
# Execute command
ret, err = False, "unknown command"
if args.command == "unban":
ret, err = cli.unban(args.ip)
if not ret:
logger.error(f"CLI command status : ❌ (fail)\n{err}")
_exit(1)
else:
logger.info(f"CLI command status : ✔️ (success)\n{err}")
_exit(0)
except SystemExit as se:
sys_exit(se.code)
except:
logger.error(f"Error while executing bwcli :\n{format_exc()}")
sys_exit(1)
sys_exit(0)

View File

@ -1,70 +0,0 @@
server {
# reason variable
set $reason '';
server_name _;
# HTTP listen
{% if LISTEN_HTTP == "yes" +%}
listen 0.0.0.0:{{ HTTP_PORT }} default_server {% if USE_PROXY_PROTOCOL == "yes" %}proxy_protocol{% endif %};
{% endif %}
{% if IS_LOADING == "yes" +%}
root /opt/bunkerweb/loading;
index index.html;
{% endif %}
# include core and plugins default-server configurations
include /etc/nginx/default-server-http/*.conf;
# include custom default-server configurations
include /opt/bunkerweb/configs/default-server-http/*.conf;
log_by_lua_block {
local utils = require "utils"
local logger = require "logger"
local datastore = require "datastore"
local plugins = require "plugins"
logger.log(ngx.INFO, "LOG", "Log phase started")
-- List all plugins
local list, err = plugins:list()
if not list then
logger.log(ngx.ERR, "LOG", "Can't list loaded plugins : " .. err)
list = {}
end
-- Call log_default method of plugins
for i, plugin in ipairs(list) do
local ret, plugin_lua = pcall(require, plugin.id .. "/" .. plugin.id)
if ret then
local plugin_obj = plugin_lua.new()
if plugin_obj.log_default ~= nil then
logger.log(ngx.INFO, "LOG", "Executing log_default() of " .. plugin.id)
local ok, err = plugin_obj:log_default()
if not ok then
logger.log(ngx.ERR, "LOG", "Error while calling log_default() on plugin " .. plugin.id .. " : " .. err)
else
logger.log(ngx.INFO, "LOG", "Return value from " .. plugin.id .. ".log_default() is : " .. err)
end
else
logger.log(ngx.INFO, "LOG", "log_default() method not found in " .. plugin.id .. ", skipped execution")
end
end
end
-- Display reason at info level
local reason = utils.get_reason()
if reason then
logger.log(ngx.INFO, "LOG", "Client was denied with reason : " .. reason)
end
logger.log(ngx.INFO, "LOG", "Log phase ended")
}
}

View File

@ -1,92 +0,0 @@
# /etc/nginx/base_http.conf
# zero copy within the kernel
sendfile on;
# send packets only if filled
tcp_nopush on;
# remove 200ms delay
tcp_nodelay on;
# load mime types and set default one
include /etc/nginx/mime.types;
default_type application/octet-stream;
# access log format
log_format logf '{{ LOG_FORMAT }}';
access_log /var/log/nginx/access.log logf;
# temp paths
proxy_temp_path /opt/bunkerweb/tmp/proxy_temp;
client_body_temp_path /opt/bunkerweb/tmp/client_temp;
fastcgi_temp_path /opt/bunkerweb/tmp/fastcgi_temp;
uwsgi_temp_path /opt/bunkerweb/tmp/uwsgi_temp;
scgi_temp_path /opt/bunkerweb/tmp/scgi_temp;
# close connections in FIN_WAIT1 state
reset_timedout_connection on;
# timeouts
client_body_timeout 10;
client_header_timeout 10;
keepalive_timeout 15;
send_timeout 10;
# resolvers to use
resolver {{ DNS_RESOLVERS }} ipv6=off;
# remove ports when sending redirects
port_in_redirect off;
# lua path and dicts
lua_package_path "/opt/bunkerweb/lua/?.lua;/opt/bunkerweb/core/?.lua;/opt/bunkerweb/plugins/?.lua;/opt/bunkerweb/deps/lib/lua/?.lua;;";
lua_package_cpath "/opt/bunkerweb/deps/lib/?.so;/opt/bunkerweb/deps/lib/lua/?.so;;";
lua_ssl_trusted_certificate "/opt/bunkerweb/misc/root-ca.pem";
lua_ssl_verify_depth 2;
lua_shared_dict datastore {{ DATASTORE_MEMORY_SIZE }};
# LUA init block
include /etc/nginx/init-lua.conf;
# API server
{% if USE_API == "yes" %}include /etc/nginx/api.conf;{% endif +%}
# healthcheck server
include /etc/nginx/healthcheck.conf;
# default server
{% if MULTISITE == "yes" or DISABLE_DEFAULT_SERVER == "yes" or IS_LOADING == "yes" +%}
include /etc/nginx/default-server-http.conf;
{% endif +%}
# disable sending nginx version globally
server_tokens off;
# server config(s)
{% if MULTISITE == "yes" and SERVER_NAME != "" %}
{% set map_servers = {} %}
{% for server_name in SERVER_NAME.split(" ") %}
{% if server_name + "_SERVER_NAME" in all %}
{% set x = map_servers.update({server_name : all[server_name + "_SERVER_NAME"].split(" ")}) %}
{% endif %}
{% endfor %}
{% for server_name in SERVER_NAME.split(" ") %}
{% if not server_name in map_servers %}
{% set found = {"res": false} %}
{% for first_server, servers in map_servers.items() %}
{% if server_name in servers %}
{% set x = found.update({"res" : true}) %}
{% endif %}
{% endfor %}
{% if not found["res"] %}
{% set x = map_servers.update({server_name : [server_name]}) %}
{% endif %}
{% endif %}
{% endfor %}
{% for first_server in map_servers +%}
include /etc/nginx/{{ first_server }}/server.conf;
{% endfor %}
{% elif MULTISITE == "no" and SERVER_NAME != "" +%}
include /etc/nginx/server.conf;
{% endif %}

View File

@ -1,118 +0,0 @@
init_by_lua_block {
local logger = require "logger"
local datastore = require "datastore"
local plugins = require "plugins"
local utils = require "utils"
local cjson = require "cjson"
logger.log(ngx.NOTICE, "INIT", "Init phase started")
-- Remove previous data from the datastore
local data_keys = {"^plugin_", "^variable_", "^plugins$", "^api_", "^misc_"}
for i, key in pairs(data_keys) do
local ok, err = datastore:delete_all(key)
if not ok then
logger.log(ngx.ERR, "INIT", "Can't delete " .. key .. " from datastore : " .. err)
return false
end
logger.log(ngx.INFO, "INIT", "Deleted " .. key .. " from datastore")
end
-- Load variables into the datastore
local file = io.open("/etc/nginx/variables.env")
if not file then
logger.log(ngx.ERR, "INIT", "Can't open /etc/nginx/variables.env file")
return false
end
file:close()
for line in io.lines("/etc/nginx/variables.env") do
local variable, value = line:match("(.+)=(.*)")
ok, err = datastore:set("variable_" .. variable, value)
if not ok then
logger.log(ngx.ERR, "INIT", "Can't save variable " .. variable .. " into datastore")
return false
end
end
-- Set default values into the datastore
ok, err = datastore:set("plugins", cjson.encode({}))
if not ok then
logger.log(ngx.ERR, "INIT", "Can't set default value for plugins into the datastore : " .. err)
return false
end
ok, err = utils.set_values()
if not ok then
logger.log(ngx.ERR, "INIT", "Error while setting default values : " .. err)
return false
end
-- API setup
local value, err = datastore:get("variable_USE_API")
if not value then
logger.log(ngx.ERR, "INIT", "Can't get variable USE_API from the datastore")
return false
end
if value == "yes" then
value, err = datastore:get("variable_API_WHITELIST_IP")
if not value then
logger.log(ngx.ERR, "INIT", "Can't get variable API_WHITELIST_IP from the datastore")
return false
end
local whitelists = { data = {}}
for whitelist in value:gmatch("%S+") do
table.insert(whitelists.data, whitelist)
end
ok, err = datastore:set("api_whitelist_ip", cjson.encode(whitelists))
if not ok then
logger.log(ngx.ERR, "INIT", "Can't save api_whitelist_ip to datastore : " .. err)
return false
end
end
-- Load plugins into the datastore
local plugin_paths = {"/opt/bunkerweb/core", "/opt/bunkerweb/plugins"}
for i, plugin_path in ipairs(plugin_paths) do
local paths = io.popen("find -L " .. plugin_path .. " -maxdepth 1 -type d ! -path " .. plugin_path)
for path in paths:lines() do
plugin, err = plugins:load(path)
if not plugin then
logger.log(ngx.ERR, "INIT", "Error while loading plugin from " .. path .. " : " .. err)
return false
end
logger.log(ngx.NOTICE, "INIT", "Loaded plugin " .. plugin.id .. " v" .. plugin.version)
end
end
-- Call init method of plugins
local list, err = plugins:list()
if not list then
logger.log(ngx.ERR, "INIT", "Can't list loaded plugins : " .. err)
list = {}
end
for i, plugin in ipairs(list) do
local ret, plugin_lua = pcall(require, plugin.id .. "/" .. plugin.id)
if ret then
local plugin_obj = plugin_lua.new()
if plugin_obj.init ~= nil then
ok, err = plugin_obj:init()
if not ok then
logger.log(ngx.ERR, "INIT", "Plugin " .. plugin.id .. " failed on init() : " .. err)
else
logger.log(ngx.INFO, "INIT", "Successfull init() call for plugin " .. plugin.id .. " : " .. err)
end
else
logger.log(ngx.INFO, "INIT", "init() method not found in " .. plugin.id .. ", skipped execution")
end
else
if plugin_lua:match("not found") then
logger.log(ngx.INFO, "INIT", "can't require " .. plugin.id .. " : not found")
else
logger.log(ngx.ERR, "INIT", "can't require " .. plugin.id .. " : " .. plugin_lua)
end
end
end
logger.log(ngx.NOTICE, "INIT", "Init phase ended")
}

View File

@ -1,60 +0,0 @@
# /etc/nginx/nginx.conf
# load dynamic modules
load_module /opt/bunkerweb/modules/ngx_http_cookie_flag_filter_module.so;
#load_module /opt/bunkerweb/modules/ngx_http_geoip2_module.so;
load_module /opt/bunkerweb/modules/ngx_http_headers_more_filter_module.so;
load_module /opt/bunkerweb/modules/ngx_http_lua_module.so;
load_module /opt/bunkerweb/modules/ngx_http_modsecurity_module.so;
load_module /opt/bunkerweb/modules/ngx_http_brotli_filter_module.so;
load_module /opt/bunkerweb/modules/ngx_http_brotli_static_module.so;
#load_module /opt/bunkerweb/modules/ngx_stream_geoip2_module.so;
#load_module /opt/bunkerweb/modules/ngx_stream_lua_module.so;
# PID file
pid /opt/bunkerweb/tmp/nginx.pid;
# worker number (default = auto)
worker_processes {{ WORKER_PROCESSES }};
# faster regexp
pcre_jit on;
# max open files for each worker
worker_rlimit_nofile {{ WORKER_RLIMIT_NOFILE }};
# error log level
error_log /var/log/nginx/error.log {{ LOG_LEVEL }};
# reason env var
env REASON;
events {
# max connections per worker
worker_connections {{ WORKER_CONNECTIONS }};
# epoll seems to be the best on Linux
use epoll;
}
http {
# include base http configuration
include /etc/nginx/http.conf;
# include core and plugins http configurations
include /etc/nginx/http/*.conf;
# include custom http configurations
include /opt/bunkerweb/configs/http/*.conf;
}
#stream {
# include base stream configuration
# include /etc/nginx/stream.conf;
# include core and plugins stream configurations
# include /etc/nginx/stream/*.conf;
# include custom stream configurations
# include /opt/bunkerweb/configs/stream/*.conf;
#}

View File

@ -1,27 +0,0 @@
server {
# server name (vhost)
server_name {{ SERVER_NAME }};
# HTTP listen
{% if LISTEN_HTTP == "yes" +%}
listen 0.0.0.0:{{ HTTP_PORT }}{% if MULTISITE == "no" and DISABLE_DEFAULT_SERVER == "no" %} default_server{% endif %}{% if USE_PROXY_PROTOCOL == "yes" %} proxy_protocol{% endif %};
{% endif %}
index index.php index.html index.htm;
# custom config
include /opt/bunkerweb/configs/server-http/*.conf;
{% if MULTISITE == "yes" +%}
include /opt/bunkerweb/configs/server-http/{{ SERVER_NAME.split(" ")[0] }}/*.conf;
{% endif %}
# reason variable
set $reason '';
# include LUA files
include {{ NGINX_PREFIX }}access-lua.conf;
include {{ NGINX_PREFIX }}log-lua.conf;
# include config files
include {{ NGINX_PREFIX }}server-http/*.conf;
}

View File

@ -1,71 +0,0 @@
# /etc/nginx/stream.conf
# size of the preread buffer
preread_buffer_size 16k;
# timeout of the preread phase
preread_timeout 30s;
# proxy protocol timeout
proxy_protocol_timeout 30s;
# resolvers to use
resolver {{ DNS_RESOLVERS }} ipv6=off;
# resolver timeout
resolver_timeout 30s;
# remove 200ms delay
tcp_nodelay on;
# bucket hash size
variables_hash_bucket_size 64;
variables_hash_max_size 1024;
# log format and level
log_format proxy '$remote_addr [$time_local] '
'$protocol $status $bytes_sent $bytes_received '
'$session_time "$upstream_addr" '
'"$upstream_bytes_sent" "$upstream_bytes_received" "$upstream_connect_time"';
access_log /var/log/nginx/access.log proxy;
# lua path and dicts
lua_package_path "/opt/bunkerweb/lua/?.lua;/opt/bunkerweb/core/?.lua;/opt/bunkerweb/plugins/?.lua;/opt/bunkerweb/deps/lib/lua/?.lua;;";
lua_package_cpath "/opt/bunkerweb/deps/lib/?.so;/opt/bunkerweb/deps/lib/lua/?.so;;";
lua_ssl_trusted_certificate "/opt/bunkerweb/misc/root-ca.pem";
lua_ssl_verify_depth 2;
lua_shared_dict datastore 256m;
# LUA init block
include /etc/nginx/init-lua.conf;
# default server when MULTISITE=yes
{% if MULTISITE == "yes" %}include /etc/nginx/multisite-default-server.conf;{% endif +%}
# server config(s)
{% if MULTISITE == "yes" and SERVER_NAME != "" %}
{% set map_servers = {} %}
{% for server_name in SERVER_NAME.split(" ") %}
{% if server_name + "_SERVER_NAME" in all %}
{% set x = map_servers.update({server_name : all[server_name + "_SERVER_NAME"].split(" ")}) %}
{% endif %}
{% endfor %}
{% for server_name in SERVER_NAME.split(" ") %}
{% if not server_name in map_servers %}
{% set found = {"res": false} %}
{% for first_server, servers in map_servers.items() %}
{% if server_name in servers %}
{% set x = found.update({"res" : true}) %}
{% endif %}
{% endfor %}
{% if not found["res"] %}
{% set x = map_servers.update({server_name : [server_name]}) %}
{% endif %}
{% endif %}
{% endfor %}
{% for first_server in map_servers +%}
include /etc/nginx/{{ first_server }}/server.conf;
{% endfor %}
{% elif MULTISITE == "no" +%}
include /etc/nginx/server.conf;
{% endif %}

View File

@ -1,375 +0,0 @@
local _M = {}
_M.__index = _M
local utils = require "utils"
local datastore = require "datastore"
local logger = require "logger"
local cjson = require "cjson"
local session = require "resty.session"
local captcha = require "antibot.captcha"
local base64 = require "base64"
local sha256 = require "resty.sha256"
local str = require "resty.string"
local http = require "resty.http"
function _M.new()
local self = setmetatable({}, _M)
return self, nil
end
function _M:init()
-- Check if init is needed
local init_needed, err = utils.has_not_variable("USE_ANTIBOT", "no")
if init_needed == nil then
return false, err
end
if not init_needed then
return true, "no service uses Antibot, skipping init"
end
-- Load templates
local templates = {}
for i, template in ipairs({"javascript", "captcha", "recaptcha", "hcaptcha"}) do
local f, err = io.open("/opt/bunkerweb/core/antibot/files/" .. template .. ".html")
if not f then
return false, "error while loading " .. template .. ".html : " .. err
end
templates[template] = f:read("*all")
f:close()
end
local ok, err = datastore:set("plugin_antibot_templates", cjson.encode(templates))
if not ok then
return false, "can't save templates to datastore : " .. err
end
return true, "success"
end
function _M:access()
-- Check if access is needed
local antibot, err = utils.get_variable("USE_ANTIBOT")
if antibot == nil then
return false, err, nil, nil
end
if antibot == "no" then
return true, "Antibot not activated", nil, nil
end
-- Get challenge URI
local challenge_uri, err = utils.get_variable("ANTIBOT_URI")
if not challenge_uri then
return false, "can't get Antibot URI from datastore : " .. err, nil, nil
end
-- Don't go further if client resolved the challenge
local resolved, err, original_uri = self:challenge_resolved(antibot)
if resolved == nil then
return false, "can't check if challenge is resolved : " .. err, nil, nil
end
if resolved then
if ngx.var.uri == challenge_uri then
return true, "client already resolved the challenge", true, ngx.redirect(original_uri)
end
return true, "client already resolved the challenge", nil, nil
end
-- Redirect to challenge page
if ngx.var.uri ~= challenge_uri then
local ok, err = self:prepare_challenge(antibot, challenge_uri)
if not ok then
return false, "can't prepare challenge : " .. err, true, ngx.HTTP_INTERNAL_SERVER_ERROR
end
return true, "redirecting client to the challenge uri", true, ngx.redirect(challenge_uri)
end
-- Display challenge
if ngx.var.request_method == "GET" then
local ok, err = self:display_challenge(antibot, challenge_uri)
if not ok then
if err == "can't open session" then
local ok, err = self:prepare_challenge(antibot, challenge_uri)
if not ok then
return false, "can't prepare challenge : " .. err, true, ngx.HTTP_INTERNAL_SERVER_ERROR
end
return true, "redirecting client to the challenge uri", true, ngx.redirect(challenge_uri)
end
return false, "display challenge error : " .. err, true, ngx.HTTP_INTERNAL_SERVER_ERROR
end
return true, "displaying challenge to client", true, ngx.HTTP_OK
end
-- Check challenge
if ngx.var.request_method == "POST" then
local ok, err, redirect = self:check_challenge(antibot)
if ok == nil then
if err == "can't open session" then
local ok, err = self:prepare_challenge(antibot, challenge_uri)
if not ok then
return false, "can't prepare challenge : " .. err, true, ngx.HTTP_INTERNAL_SERVER_ERROR
end
return true, "redirecting client to the challenge uri", true, ngx.redirect(challenge_uri)
end
return false, "check challenge error : " .. err, true, ngx.HTTP_INTERNAL_SERVER_ERROR
end
if redirect then
return true, "check challenge redirect : " .. redirect, true, ngx.redirect(redirect)
end
local ok, err = self:display_challenge(antibot)
if not ok then
if err == "can't open session" then
local ok, err = self:prepare_challenge(antibot, challenge_uri)
if not ok then
return false, "can't prepare challenge : " .. err, true, ngx.HTTP_INTERNAL_SERVER_ERROR
end
return true, "redirecting client to the challenge uri", true, ngx.redirect(challenge_uri)
end
return false, "display challenge error : " .. err, true, ngx.HTTP_INTERNAL_SERVER_ERROR
end
return true, "displaying challenge to client", true, ngx.HTTP_OK
end
-- Method is suspicious, let's deny the request
return true, "unsupported HTTP method for Antibot", true, utils.get_deny_status()
end
function _M:challenge_resolved(antibot)
local chall_session, present, reason = session.open()
if present and chall_session.data.resolved and chall_session.data.type == antibot then
return true, "challenge " .. antibot .. " resolved", chall_session.data.original_uri
end
return false, "challenge " .. antibot .. " not resolved", nil
end
function _M:prepare_challenge(antibot, challenge_uri)
local chall_session, present, reason = session.open()
if not present then
local chall_session, present, reason = chall_session:start()
if not chall_session then
return false, "can't start session", nil
end
chall_session.data.type = antibot
chall_session.data.resolved = false
if ngx.var.request_uri == challenge_uri then
chall_session.data.original_uri = "/"
else
chall_session.data.original_uri = ngx.var.request_uri
end
if antibot == "cookie" then
chall_session.data.resolved = true
end
local saved, err = chall_session:save()
if not saved then
return false, "error while saving session : " .. err
end
end
return true, antibot .. " challenge prepared"
end
function _M:display_challenge(antibot, challenge_uri)
-- Open session
local chall_session, present, reason = session.open()
if not present then
return false, "can't open session"
end
-- Check if session type is equal to antibot type
if antibot ~= chall_session.data.type then
return false, "session type is different from antibot type"
end
-- Compute challenges
if antibot == "javascript" then
chall_session:start()
chall_session.data.random = utils.rand(20)
chall_session:save()
elseif antibot == "captcha" then
chall_session:start()
local chall_captcha = captcha.new()
chall_captcha:font("/opt/bunkerweb/core/antibot/files/font.ttf")
chall_captcha:generate()
chall_session.data.image = base64.encode(chall_captcha:jpegStr(70))
chall_session.data.text = chall_captcha:getStr()
chall_session:save()
end
-- Load HTML templates
local str_templates, err = datastore:get("plugin_antibot_templates")
if not str_templates then
return false, "can't get templates from datastore : " .. err
end
local templates = cjson.decode(str_templates)
local html = ""
-- Javascript case
if antibot == "javascript" then
html = templates.javascript:format(challenge_uri, chall_session.data.random)
end
-- Captcha case
if antibot == "captcha" then
html = templates.captcha:format(challenge_uri, chall_session.data.image)
end
-- reCAPTCHA case
if antibot == "recaptcha" then
local recaptcha_sitekey, err = utils.get_variable("ANTIBOT_RECAPTCHA_SITEKEY")
if not recaptcha_sitekey then
return false, "can't get reCAPTCHA sitekey variable : " .. err
end
html = templates.recaptcha:format(recaptcha_sitekey, challenge_uri, recaptcha_sitekey)
end
-- hCaptcha case
if antibot == "hcaptcha" then
local hcaptcha_sitekey, err = utils.get_variable("ANTIBOT_HCAPTCHA_SITEKEY")
if not hcaptcha_sitekey then
return false, "can't get hCaptcha sitekey variable : " .. err
end
html = templates.hcaptcha:format(challenge_uri, hcaptcha_sitekey)
end
ngx.header["Content-Type"] = "text/html"
ngx.say(html)
return true, "displayed challenge"
end
function _M:check_challenge(antibot)
-- Open session
local chall_session, present, reason = session.open()
if not present then
return nil, "can't open session", nil
end
-- Check if session type is equal to antibot type
if antibot ~= chall_session.data.type then
return nil, "session type is different from antibot type", nil
end
local resolved = false
local err = ""
local redirect = nil
-- Javascript case
if antibot == "javascript" then
ngx.req.read_body()
local args, err = ngx.req.get_post_args(1)
if err == "truncated" or not args or not args["challenge"] then
return false, "missing challenge arg", nil
end
local hash = sha256:new()
hash:update(chall_session.data.random .. args["challenge"])
local digest = hash:final()
resolved = str.to_hex(digest):find("^0000") ~= nil
if not resolved then
return false, "wrong value", nil
end
chall_session:start()
chall_session.data.resolved = true
chall_session:save()
return true, "resolved", chall_session.data.original_uri
end
-- Captcha case
if antibot == "captcha" then
ngx.req.read_body()
local args, err = ngx.req.get_post_args(1)
if err == "truncated" or not args or not args["captcha"] then
return false, "missing challenge arg", nil
end
if chall_session.data.text ~= args["captcha"] then
return false, "wrong value", nil
end
chall_session:start()
chall_session.data.resolved = true
chall_session:save()
return true, "resolved", chall_session.data.original_uri
end
-- reCAPTCHA case
if antibot == "recaptcha" then
ngx.req.read_body()
local args, err = ngx.req.get_post_args(1)
if err == "truncated" or not args or not args["token"] then
return false, "missing challenge arg", nil
end
local recaptcha_secret, err = utils.get_variable("ANTIBOT_RECAPTCHA_SECRET")
if not recaptcha_secret then
return nil, "can't get reCAPTCHA secret variable : " .. err, nil
end
local httpc, err = http.new()
if not httpc then
return false, "can't instantiate http object : " .. err, nil, nil
end
local res, err = httpc:request_uri("https://www.google.com/recaptcha/api/siteverify", {
method = "POST",
body = "secret=" .. recaptcha_secret .. "&response=" .. args["token"] .. "&remoteip=" .. ngx.var.remote_addr,
headers = {
["Content-Type"] = "application/x-www-form-urlencoded"
}
})
httpc:close()
if not res then
return nil, "can't send request to reCAPTCHA API : " .. err, nil
end
local ok, data = pcall(cjson.decode, res.body)
if not ok then
return nil, "error while decoding JSON from reCAPTCHA API : " .. data, nil
end
local recaptcha_score, err = utils.get_variable("ANTIBOT_RECAPTCHA_SCORE")
if not recaptcha_score then
return nil, "can't get reCAPTCHA score variable : " .. err, nil
end
if not data.success or data.score < tonumber(recaptcha_score) then
return false, "client failed challenge with score " .. tostring(data.score), nil
end
chall_session:start()
chall_session.data.resolved = true
chall_session:save()
return true, "resolved", chall_session.data.original_uri
end
-- hCaptcha case
if antibot == "hcaptcha" then
ngx.req.read_body()
local args, err = ngx.req.get_post_args(1)
if err == "truncated" or not args or not args["token"] then
return false, "missing challenge arg", nil
end
local hcaptcha_secret, err = utils.get_variable("ANTIBOT_HCAPTCHA_SECRET")
if not hcaptcha_secret then
return nil, "can't get hCaptcha secret variable : " .. err, nil
end
local httpc, err = http.new()
if not httpc then
return false, "can't instantiate http object : " .. err, nil, nil
end
local res, err = httpc:request_uri("https://hcaptcha.com/siteverify", {
method = "POST",
body = "secret=" .. hcaptcha_secret .. "&response=" .. args["token"] .. "&remoteip=" .. ngx.var.remote_addr,
headers = {
["Content-Type"] = "application/x-www-form-urlencoded"
}
})
httpc:close()
if not res then
return nil, "can't send request to hCaptcha API : " .. err, nil
end
local ok, data = pcall(cjson.decode, res.body)
if not ok then
return nil, "error while decoding JSON from hCaptcha API : " .. data, nil
end
if not data.success then
return false, "client failed challenge", nil
end
chall_session:start()
chall_session.data.resolved = true
chall_session:save()
return true, "resolved", chall_session.data.original_uri
end
return nil, "unknown", nil
end
return _M

View File

@ -1,344 +0,0 @@
local _M = {}
_M.__index = _M
local utils = require "utils"
local datastore = require "datastore"
local logger = require "logger"
local cjson = require "cjson"
local ipmatcher = require "resty.ipmatcher"
function _M.new()
local self = setmetatable({}, _M)
return self, nil
end
function _M:init()
-- Check if init is needed
local init_needed, err = utils.has_variable("USE_BLACKLIST", "yes")
if init_needed == nil then
return false, err
end
if not init_needed then
return true, "no service uses Blacklist, skipping init"
end
-- Read blacklists
local blacklists = {
["IP"] = {},
["RDNS"] = {},
["ASN"] = {},
["USER_AGENT"] = {},
["URI"] = {},
["IGNORE_IP"] = {},
["IGNORE_RDNS"] = {},
["IGNORE_ASN"] = {},
["IGNORE_USER_AGENT"] = {},
["IGNORE_URI"] = {},
}
local i = 0
for kind, _ in pairs(blacklists) do
local f, err = io.open("/opt/bunkerweb/cache/blacklist/" .. kind .. ".list", "r")
if f then
for line in f:lines() do
table.insert(blacklists[kind], line)
i = i + 1
end
f:close()
end
end
-- Load them into datastore
local ok, err = datastore:set("plugin_blacklist_list", cjson.encode(blacklists))
if not ok then
return false, "can't store Blacklist list into datastore : " .. err
end
return true, "successfully loaded " .. tostring(i) .. " bad IP/network/rDNS/ASN/User-Agent/URI"
end
function _M:access()
-- Check if access is needed
local access_needed, err = utils.get_variable("USE_BLACKLIST")
if access_needed == nil then
return false, err
end
if access_needed ~= "yes" then
return true, "Blacklist not activated"
end
-- Check the cache
local cached_ip, err = self:is_in_cache("ip" .. ngx.var.remote_addr)
local cached_ignored_ip, err = self:is_in_cache("ignore_ip" .. ngx.var.remote_addr)
if cached_ignored_ip then
logger.log(ngx.NOTICE, "BLACKLIST", "IP is in cached ignore blacklist (info: " .. cached_ignored_ip .. ")")
elseif cached_ip and cached_ip ~= "ok" then
return true, "IP is in blacklist cache (info = " .. cached_ip .. ")", true, utils.get_deny_status()
end
local cached_uri, err = self:is_in_cache("uri" .. ngx.var.uri)
local cached_ignored_uri, err = self:is_in_cache("ignore_uri" .. ngx.var.uri)
if cached_ignored_uri then
logger.log(ngx.NOTICE, "BLACKLIST", "URI is in cached ignore blacklist (info: " .. cached_ignored_uri .. ")")
elseif cached_uri and cached_uri ~= "ok" then
return true, "URI is in blacklist cache (info = " .. cached_uri .. ")", true, utils.get_deny_status()
end
local cached_ua = true
local cached_ignored_ua = false
if ngx.var.http_user_agent then
cached_ua, err = self:is_in_cache("ua" .. ngx.var.http_user_agent)
cached_ignored_ua, err = self:is_in_cache("ignore_ua" .. ngx.var.http_user_agent)
if cached_ignored_ua then
logger.log(ngx.NOTICE, "BLACKLIST", "User-Agent is in cached ignore blacklist (info: " .. cached_ignored_ua .. ")")
elseif cached_ua and cached_ua ~= "ok" then
return true, "User-Agent is in blacklist cache (info = " .. cached_ua .. ")", true, utils.get_deny_status()
end
end
if cached_ignored_ip and cached_ignored_uri and cached_ignored_ua then
logger.log(ngx.NOTICE, "BLACKLIST", "full request is in cached ignore blacklist")
elseif cached_ip and cached_uri and cached_ua then
return true, "full request is in blacklist cache (not blacklisted)", false, nil
end
-- Get list
local data, err = datastore:get("plugin_blacklist_list")
if not data then
return false, "can't get Blacklist list : " .. err, false, nil
end
local ok, blacklists = pcall(cjson.decode, data)
if not ok then
return false, "error while decoding blacklists : " .. blacklists, false, nil
end
-- Return value
local ret, ret_err = true, "success"
-- Check if IP is in IP/net blacklist
local ip_net, err = utils.get_variable("BLACKLIST_IP")
local ignored_ip_net, err = utils.get_variable("BLACKLIST_IGNORE_IP")
if ip_net and ip_net ~= "" then
for element in ip_net:gmatch("%S+") do
table.insert(blacklists["IP"], element)
end
end
if ignored_ip_net and ignored_ip_net ~= "" then
for element in ignored_ip_net:gmatch("%S+") do
table.insert(blacklists["IGNORE_IP"], element)
end
end
if not cached_ip then
local ipm, err = ipmatcher.new(blacklists["IP"])
local ipm_ignore, err_ignore = ipmatcher.new(blacklists["IGNORE_IP"])
if not ipm then
ret = false
ret_err = "can't instantiate ipmatcher " .. err
elseif not ipm_ignore then
ret = false
ret_err = "can't instantiate ipmatcher " .. err_ignore
else
if ipm:match(ngx.var.remote_addr) then
if ipm_ignore:match(ngx.var.remote_addr) then
self:add_to_cache("ignore_ip" .. ngx.var.remote_addr, "ip/net")
logger.log(ngx.NOTICE, "BLACKLIST", "client IP " .. ngx.var.remote_addr .. " is in blacklist but is ignored")
else
self:add_to_cache("ip" .. ngx.var.remote_addr, "ip/net")
return ret, "client IP " .. ngx.var.remote_addr .. " is in blacklist", true, utils.get_deny_status()
end
end
end
end
-- Instantiate ignore variable
local ignore = false
-- Check if rDNS is in blacklist
local rdns_global, err = utils.get_variable("BLACKLIST_RDNS_GLOBAL")
local check = true
if not rdns_global then
logger.log(ngx.ERR, "BLACKLIST", "Error while getting BLACKLIST_RDNS_GLOBAL variable : " .. err)
elseif rdns_global == "yes" then
check, err = utils.ip_is_global(ngx.var.remote_addr)
if check == nil then
logger.log(ngx.ERR, "BLACKLIST", "Error while getting checking if IP is global : " .. err)
end
end
if not cached_ip and check then
local rdns, err = utils.get_rdns(ngx.var.remote_addr)
if not rdns then
ret = false
ret_err = "error while trying to get reverse dns : " .. err
else
local rdns_list, err = utils.get_variable("BLACKLIST_RDNS")
local ignored_rdns_list, err = utils.get_variable("BLACKLIST_IGNORE_RDNS")
if rdns_list and rdns_list ~= "" then
for element in rdns_list:gmatch("%S+") do
table.insert(blacklists["RDNS"], element)
end
end
if ignored_rdns_list and ignored_rdns_list ~= "" then
for element in ignored_rdns_list:gmatch("%S+") do
table.insert(blacklists["IGNORE_RDNS"], element)
end
end
for i, suffix in ipairs(blacklists["RDNS"]) do
if rdns:sub(-#suffix) == suffix then
for j, ignore_suffix in ipairs(blacklists["IGNORE_RDNS"]) do
if rdns:sub(-#ignore_suffix) == ignore_suffix then
ignore = true
self:add_to_cache("ignore_rdns" .. ngx.var.remote_addr, "rDNS" .. suffix)
logger.log(ngx.NOTICE, "BLACKLIST", "client IP " .. ngx.var.remote_addr .. " is in blacklist (info = rDNS " .. suffix .. ") but is ignored")
break
end
end
if not ignore then
self:add_to_cache("ip" .. ngx.var.remote_addr, "rDNS" .. suffix)
return ret, "client IP " .. ngx.var.remote_addr .. " is in blacklist (info = rDNS " .. suffix .. ")", true, utils.get_deny_status()
end
end
end
end
end
-- Check if ASN is in blacklist
if not cached_ip then
if utils.ip_is_global(ngx.var.remote_addr) then
local asn, err = utils.get_asn(ngx.var.remote_addr)
if not asn then
ret = false
ret_err = "error while trying to get asn number : " .. err
else
local asn_list, err = utils.get_variable("BLACKLIST_ASN")
local ignored_asn_list, err = utils.get_variable("BLACKLIST_IGNORE_ASN")
if asn_list and asn_list ~= "" then
for element in asn_list:gmatch("%S+") do
table.insert(blacklists["ASN"], element)
end
end
if ignored_asn_list and ignored_asn_list ~= "" then
for element in ignored_asn_list:gmatch("%S+") do
table.insert(blacklists["IGNORE_ASN"], element)
end
end
for i, asn_bl in ipairs(blacklists["ASN"]) do
if tostring(asn) == asn_bl then
for j, ignore_asn_bl in ipairs(blacklists["IGNORE_ASN"]) do
if tostring(asn) == ignore_asn_bl then
ignore = true
self:add_to_cache("ignore_asn" .. ngx.var.remote_addr, "ASN" .. tostring(asn))
logger.log(ngx.NOTICE, "BLACKLIST", "client IP " .. ngx.var.remote_addr .. " is in blacklist (info = ASN " .. tostring(asn) .. ") but is ignored")
break
end
end
if not ignore then
self:add_to_cache("ip" .. ngx.var.remote_addr, "ASN " .. tostring(asn))
return ret, "client IP " .. ngx.var.remote_addr .. " is in blacklist (kind = ASN " .. tostring(asn) .. ")", true, utils.get_deny_status()
end
end
end
end
end
end
-- IP is not blacklisted
local ok, err = self:add_to_cache("ip" .. ngx.var.remote_addr, "ok")
if not ok then
ret = false
ret_err = err
end
-- Check if User-Agent is in blacklist
if not cached_ua and ngx.var.http_user_agent then
local ua_list, err = utils.get_variable("BLACKLIST_USER_AGENT")
local ignored_ua_list, err = utils.get_variable("BLACKLIST_IGNORE_USER_AGENT")
if ua_list and ua_list ~= "" then
for element in ua_list:gmatch("%S+") do
table.insert(blacklists["USER_AGENT"], element)
end
end
if ignored_ua_list and ignored_ua_list ~= "" then
for element in ignored_ua_list:gmatch("%S+") do
table.insert(blacklists["IGNORE_USER_AGENT"], element)
end
end
for i, ua_bl in ipairs(blacklists["USER_AGENT"]) do
if ngx.var.http_user_agent:match(ua_bl) then
for j, ignore_ua_bl in ipairs(blacklists["IGNORE_USER_AGENT"]) do
if ngx.var.http_user_agent:match(ignore_ua_bl) then
ignore = true
self:add_to_cache("ignore_ua" .. ngx.var.remote_addr, "UA" .. ua_bl)
logger.log(ngx.NOTICE, "BLACKLIST", "client User-Agent " .. ngx.var.http_user_agent .. " is in blacklist (matched " .. ua_bl .. ") but is ignored")
break
end
end
if not ignore then
self:add_to_cache("ua" .. ngx.var.http_user_agent, "UA " .. ua_bl)
return ret, "client User-Agent " .. ngx.var.http_user_agent .. " is in blacklist (matched " .. ua_bl .. ")", true, utils.get_deny_status()
end
end
end
-- UA is not blacklisted
local ok, err = self:add_to_cache("ua" .. ngx.var.http_user_agent, "ok")
if not ok then
ret = false
ret_err = err
end
end
-- Check if URI is in blacklist
if not cached_uri then
local uri_list, err = utils.get_variable("BLACKLIST_URI")
local ignored_uri_list, err = utils.get_variable("BLACKLIST_IGNORE_URI")
if uri_list and uri_list ~= "" then
for element in uri_list:gmatch("%S+") do
table.insert(blacklists["URI"], element)
end
end
if ignored_uri_list and ignored_uri_list ~= "" then
for element in ignored_uri_list:gmatch("%S+") do
table.insert(blacklists["IGNORE_URI"], element)
end
end
for i, uri_bl in ipairs(blacklists["URI"]) do
if ngx.var.uri:match(uri_bl) then
for j, ignore_uri_bl in ipairs(blacklists["IGNORE_URI"]) do
if ngx.var.uri:match(ignore_uri_bl) then
ignore = true
self:add_to_cache("ignore_uri" .. ngx.var.remote_addr, "URI" .. uri_bl)
logger.log(ngx.NOTICE, "BLACKLIST", "client URI " .. ngx.var.uri .. " is in blacklist (matched " .. uri_bl .. ") but is ignored")
break
end
end
if not ignore then
self:add_to_cache("uri" .. ngx.var.uri, "URI " .. uri_bl)
return ret, "client URI " .. ngx.var.uri .. " is in blacklist (matched " .. uri_bl .. ")", true, utils.get_deny_status()
end
end
end
end
-- URI is not blacklisted
local ok, err = self:add_to_cache("uri" .. ngx.var.uri, "ok")
if not ok then
ret = false
ret_err = err
end
return ret, "IP is not in list (error = " .. ret_err .. ")", false, nil
end
function _M:is_in_cache(ele)
local kind, err = datastore:get("plugin_blacklist_cache_" .. ngx.var.server_name .. ele)
if not kind then
if err ~= "not found" then
logger.log(ngx.ERR, "BLACKLIST", "Error while accessing cache : " .. err)
end
return false, err
end
return kind, "success"
end
function _M:add_to_cache(ele, kind)
local ok, err = datastore:set("plugin_blacklist_cache_" .. ngx.var.server_name .. ele, kind, 3600)
if not ok then
logger.log(ngx.ERR, "BLACKLIST", "Error while adding element to cache : " .. err)
return false, err
end
return true, "success"
end
return _M

View File

@ -1,199 +0,0 @@
#!/usr/bin/python3
from ipaddress import ip_address, ip_network
from os import _exit, getenv, makedirs
from re import match
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
sys_path.append("/opt/bunkerweb/db")
from requests import get
from Database import Database
from logger import setup_logger
from jobs import cache_file, cache_hash, is_cached_file, file_hash
def check_line(kind, line):
if kind == "IP":
if "/" in line:
try:
ip_network(line)
return True, line
except ValueError:
pass
else:
try:
ip_address(line)
return True, line
except ValueError:
pass
return False, ""
elif kind == "RDNS":
if match(r"^(\.?[A-Za-z0-9\-]+)*\.[A-Za-z]{2,}$", line):
return True, line.lower()
return False, ""
elif kind == "ASN":
real_line = line.replace("AS", "")
if match(r"^\d+$", real_line):
return True, real_line
elif kind == "USER_AGENT":
return True, line.replace("\\ ", " ").replace("\\.", "%.").replace(
"\\\\", "\\"
).replace("-", "%-")
elif kind == "URI":
if match(r"^/", line):
return True, line
return False, ""
logger = setup_logger("BLACKLIST", getenv("LOG_LEVEL", "INFO"))
db = Database(
logger,
sqlalchemy_string=getenv("DATABASE_URI", None),
)
status = 0
try:
# Check if at least a server has Blacklist activated
blacklist_activated = False
# Multisite case
if getenv("MULTISITE") == "yes":
for first_server in getenv("SERVER_NAME").split(" "):
if (
getenv(first_server + "_USE_BLACKLIST", getenv("USE_BLACKLIST"))
== "yes"
):
blacklist_activated = True
break
# Singlesite case
elif getenv("USE_BLACKLIST") == "yes":
blacklist_activated = True
if not blacklist_activated:
logger.info("Blacklist is not activated, skipping downloads...")
_exit(0)
# Create directories if they don't exist
makedirs("/opt/bunkerweb/cache/blacklist", exist_ok=True)
makedirs("/opt/bunkerweb/tmp/blacklist", exist_ok=True)
# Our urls data
urls = {"IP": [], "RDNS": [], "ASN": [], "USER_AGENT": [], "URI": []}
# Don't go further if the cache is fresh
kinds_fresh = {
"IP": True,
"RDNS": True,
"ASN": True,
"USER_AGENT": True,
"URI": True,
"IGNORE_IP": True,
"IGNORE_RDNS": True,
"IGNORE_ASN": True,
"IGNORE_USER_AGENT": True,
"IGNORE_URI": True,
}
all_fresh = True
for kind in kinds_fresh:
if not is_cached_file(f"/opt/bunkerweb/cache/blacklist/{kind}.list", "hour"):
kinds_fresh[kind] = False
all_fresh = False
logger.info(
f"Blacklist for {kind} is not cached, processing downloads..",
)
else:
logger.info(
f"Blacklist for {kind} is already in cache, skipping downloads...",
)
if all_fresh:
_exit(0)
# Get URLs
urls = {
"IP": [],
"RDNS": [],
"ASN": [],
"USER_AGENT": [],
"URI": [],
"IGNORE_IP": [],
"IGNORE_RDNS": [],
"IGNORE_ASN": [],
"IGNORE_USER_AGENT": [],
"IGNORE_URI": [],
}
for kind in urls:
for url in getenv(f"BLACKLIST_{kind}_URLS", "").split(" "):
if url != "" and url not in urls[kind]:
urls[kind].append(url)
# Loop on kinds
for kind, urls_list in urls.items():
if kinds_fresh[kind]:
continue
# Write combined data of the kind to a single temp file
for url in urls_list:
try:
logger.info(f"Downloading blacklist data from {url} ...")
resp = get(url)
if resp.status_code != 200:
continue
i = 0
with open(f"/opt/bunkerweb/tmp/blacklist/{kind}.list", "w") as f:
for line in resp.content.decode("utf-8").splitlines():
line = line.strip()
if kind != "USER_AGENT":
line = line.strip().split(" ")[0]
if line == "" or line.startswith("#") or line.startswith(";"):
continue
ok, data = check_line(kind, line)
if ok:
f.write(data + "\n")
i += 1
logger.info(f"Downloaded {i} bad {kind}")
# Check if file has changed
new_hash = file_hash(f"/opt/bunkerweb/tmp/blacklist/{kind}.list")
old_hash = cache_hash(f"/opt/bunkerweb/cache/blacklist/{kind}.list")
if new_hash == old_hash:
logger.info(
f"New file {kind}.list is identical to cache file, reload is not needed",
)
else:
logger.info(
f"New file {kind}.list is different than cache file, reload is needed",
)
# Put file in cache
cached, err = cache_file(
f"/opt/bunkerweb/tmp/blacklist/{kind}.list",
f"/opt/bunkerweb/cache/blacklist/{kind}.list",
new_hash,
)
if not cached:
logger.error(f"Error while caching blacklist : {err}")
status = 2
if status != 2:
# Update db
err = db.update_job_cache(
"blacklist-download",
None,
f"{kind}.list",
resp.content,
checksum=new_hash,
)
if err:
logger.warning(f"Couldn't update db cache: {err}")
status = 1
except:
status = 2
logger.error(
f"Exception while getting blacklist from {url} :\n{format_exc()}"
)
except:
status = 2
logger.error(f"Exception while running blacklist-download.py :\n{format_exc()}")
sys_exit(status)

View File

@ -1,245 +0,0 @@
local _M = {}
_M.__index = _M
local utils = require "utils"
local datastore = require "datastore"
local logger = require "logger"
local cjson = require "cjson"
local http = require "resty.http"
function _M.new()
local self = setmetatable({}, _M)
local server, err = datastore:get("variable_BUNKERNET_SERVER")
if not server then
return nil, "can't get BUNKERNET_SERVER from datastore : " .. err
end
self.server = server
local id, err = datastore:get("plugin_bunkernet_id")
if not id then
self.id = nil
else
self.id = id
end
return self, nil
end
function _M:init()
local init_needed, err = utils.has_variable("USE_BUNKERNET", "yes")
if init_needed == nil then
return false, err
end
if not init_needed then
return true, "no service uses BunkerNet, skipping init"
end
-- Check if instance ID is present
local f, err = io.open("/opt/bunkerweb/cache/bunkernet/instance.id", "r")
if not f then
return false, "can't read instance id : " .. err
end
-- Retrieve instance ID
id = f:read("*all"):gsub("[\r\n]", "")
f:close()
self.id = id
-- TODO : regex check just in case
-- Send a ping with the ID
--local ok, err, status, response = self:ping()
-- BunkerNet server is down or instance can't access it
--if not ok then
--return false, "can't send request to BunkerNet service : " .. err
-- Local instance ID is unknown to the server, let's delete it
--elseif status == 401 then
--local ok, message = os.remove("/opt/bunkerweb/cache/bunkernet/instance.id")
--if not ok then
--return false, "can't remove instance ID " .. message
--end
--return false, "instance ID is not valid"
--elseif status == 429 then
--return false, "sent too many requests to the BunkerNet service"
--elseif status ~= 200 then
--return false, "unknown error from BunkerNet service (HTTP status = " .. tostring(status) .. ")"
--end
-- Store ID in datastore
local ok, err = datastore:set("plugin_bunkernet_id", id)
if not ok then
return false, "can't save instance ID to the datastore : " .. err
end
-- Load databases
local ret = true
local i = 0
local db = {
ip = {}
}
f, err = io.open("/opt/bunkerweb/cache/bunkernet/ip.list", "r")
if not f then
ret = false
else
for line in f:lines() do
if utils.is_ipv4(line) and utils.ip_is_global(line) then
table.insert(db.ip, line)
i = i + 1
end
end
end
if not ret then
return false, "error while reading database : " .. err
end
f:close()
local ok, err = datastore:set("plugin_bunkernet_db", cjson.encode(db))
if not ok then
return false, "can't store BunkerNet database into datastore : " .. err
end
return true, "successfully connected to the BunkerNet service " .. self.server .. " with machine ID " .. id .. " and " .. tostring(i) .. " bad IPs in database"
end
function _M:request(method, url, data)
local httpc, err = http.new()
if not httpc then
return false, "can't instantiate http object : " .. err, nil, nil
end
local all_data = {
id = self.id,
integration = utils.get_integration(),
version = utils.get_version()
}
for k, v in pairs(data) do
all_data[k] = v
end
local res, err = httpc:request_uri(self.server .. url, {
method = method,
body = cjson.encode(all_data),
headers = {
["Content-Type"] = "application/json",
["User-Agent"] = "BunkerWeb/" .. utils.get_version()
}
})
httpc:close()
if not res then
return false, "error while sending request : " .. err, nil, nil
end
if res.status ~= 200 then
return false, "status code != 200", res.status, nil
end
local ok, ret = pcall(cjson.decode, res.body)
if not ok then
return false, "error while decoding json : " .. ret, nil, nil
end
return true, "success", res.status, ret
end
function _M:ping()
return self:request("GET", "/ping", {})
end
function _M:report(ip, reason, method, url, headers)
local data = {
ip = ip,
reason = reason,
method = method,
url = url,
headers = headers
}
return self:request("POST", "/report", data)
end
function _M:log(bypass_use_bunkernet)
if not bypass_use_bunkernet then
-- Check if BunkerNet is activated
local use_bunkernet = utils.get_variable("USE_BUNKERNET")
if use_bunkernet ~= "yes" then
return true, "bunkernet not activated"
end
end
-- Check if BunkerNet ID is generated
if not self.id then
return true, "bunkernet ID is not generated"
end
-- Check if IP has been blocked
local reason = utils.get_reason()
if not reason then
return true, "ip is not blocked"
end
if reason == "bunkernet" then
return true, "skipping report because the reason is bunkernet"
end
-- Check if IP is global
local is_global, err = utils.ip_is_global(ngx.var.remote_addr)
if is_global == nil then
return false, "error while checking if IP is global " .. err
end
if not is_global then
return true, "IP is not global"
end
-- Only report if it hasn't been reported for the same reason recently
--local reported = datastore:get("plugin_bunkernet_cache_" .. ngx.var.remote_addr .. reason)
--if reported then
--return true, "ip already reported recently"
--end
local function report_callback(premature, obj, ip, reason, method, url, headers)
local ok, err, status, data = obj:report(ip, reason, method, url, headers)
if status == 429 then
logger.log(ngx.WARN, "BUNKERNET", "BunkerNet API is rate limiting us")
elseif not ok then
logger.log(ngx.ERR, "BUNKERNET", "Can't report IP : " .. err)
else
logger.log(ngx.NOTICE, "BUNKERNET", "Successfully reported IP " .. ip .. " (reason : " .. reason .. ")")
--local ok, err = datastore:set("plugin_bunkernet_cache_" .. ip .. reason, true, 3600)
--if not ok then
--logger.log(ngx.ERR, "BUNKERNET", "Can't store cached report : " .. err)
--end
end
end
local hdr, err = ngx.timer.at(0, report_callback, self, ngx.var.remote_addr, reason, ngx.var.request_method, ngx.var.request_uri, ngx.req.get_headers())
if not hdr then
return false, "can't create report timer : " .. err
end
return true, "created report timer"
end
function _M:log_default()
-- Check if bunkernet is activated
local check, err = utils.has_variable("USE_BUNKERNET", "yes")
if check == nil then
return false, "error while checking variable USE_BUNKERNET (" .. err .. ")"
end
if not check then
return true, "bunkernet not enabled"
end
-- Check if default server is disabled
local check, err = utils.get_variable("DISABLE_DEFAULT_SERVER", false)
if check == nil then
return false, "error while getting variable DISABLE_DEFAULT_SERVER (" .. err .. ")"
end
if check ~= "yes" then
return true, "default server not disabled"
end
-- Call log method
return self:log(true)
end
function _M:access()
local use_bunkernet = utils.get_variable("USE_BUNKERNET")
if use_bunkernet ~= "yes" then
return true, "bunkernet not activated", false, nil
end
-- Check if BunkerNet ID is generated
if not self.id then
return true, "bunkernet ID is not generated"
end
local data, err = datastore:get("plugin_bunkernet_db")
if not data then
return false, "can't get bunkernet db : " .. err, false, nil
end
local db = cjson.decode(data)
for index, value in ipairs(db.ip) do
if value == ngx.var.remote_addr then
return true, "ip is in database", true, utils.get_deny_status()
end
end
return true, "ip is not in database", false, nil
end
function _M:api()
return false, nil, nil
end
return _M

View File

@ -1,126 +0,0 @@
#!/usr/bin/python3
from os import _exit, getenv, makedirs
from os.path import isfile
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
sys_path.append("/opt/bunkerweb/db")
sys_path.append("/opt/bunkerweb/core/bunkernet/jobs")
from bunkernet import data
from Database import Database
from logger import setup_logger
from jobs import cache_file, cache_hash, file_hash, is_cached_file
logger = setup_logger("BUNKERNET", getenv("LOG_LEVEL", "INFO"))
db = Database(
logger,
sqlalchemy_string=getenv("DATABASE_URI", None),
)
status = 0
try:
# Check if at least a server has BunkerNet activated
bunkernet_activated = False
# Multisite case
if getenv("MULTISITE") == "yes":
for first_server in getenv("SERVER_NAME").split(" "):
if (
getenv(f"{first_server}_USE_BUNKERNET", getenv("USE_BUNKERNET"))
== "yes"
):
bunkernet_activated = True
break
# Singlesite case
elif getenv("USE_BUNKERNET") == "yes":
bunkernet_activated = True
if not bunkernet_activated:
logger.info("BunkerNet is not activated, skipping download...")
_exit(0)
# Create directory if it doesn't exist
makedirs("/opt/bunkerweb/cache/bunkernet", exist_ok=True)
# Check if ID is present
if not isfile("/opt/bunkerweb/cache/bunkernet/instance.id"):
logger.error(
"Not downloading BunkerNet data because instance is not registered",
)
_exit(2)
# Don't go further if the cache is fresh
if is_cached_file("/opt/bunkerweb/cache/bunkernet/ip.list", "day"):
logger.info(
"BunkerNet list is already in cache, skipping download...",
)
_exit(0)
# Download data
logger.info("Downloading BunkerNet data ...")
ok, status, data = data()
if not ok:
logger.error(
f"Error while sending data request to BunkerNet API : {data}",
)
_exit(2)
elif status == 429:
logger.warning(
"BunkerNet API is rate limiting us, trying again later...",
)
_exit(0)
elif data["result"] != "ok":
logger.error(
f"Received error from BunkerNet API while sending db request : {data['data']}, removing instance ID",
)
_exit(2)
logger.info("Successfully downloaded data from BunkerNet API")
# Writing data to file
logger.info("Saving BunkerNet data ...")
with open("/opt/bunkerweb/tmp/bunkernet-ip.list", "w") as f:
for ip in data["data"]:
f.write(f"{ip}\n")
# Check if file has changed
new_hash = file_hash("/opt/bunkerweb/tmp/bunkernet-ip.list")
old_hash = cache_hash("/opt/bunkerweb/cache/bunkernet/ip.list")
if new_hash == old_hash:
logger.info(
"New file is identical to cache file, reload is not needed",
)
_exit(0)
# Put file in cache
cached, err = cache_file(
"/opt/bunkerweb/tmp/bunkernet-ip.list",
"/opt/bunkerweb/cache/bunkernet/ip.list",
new_hash,
)
if not cached:
logger.error(f"Error while caching BunkerNet data : {err}")
_exit(2)
# Update db
err = db.update_job_cache(
"bunkernet-data",
None,
"ip.list",
"\n".join(data["data"]).encode("utf-8"),
checksum=new_hash,
)
if err:
logger.warning(f"Couldn't update db cache: {err}")
logger.info("Successfully saved BunkerNet data")
status = 1
except:
status = 2
logger.error(f"Exception while running bunkernet-data.py :\n{format_exc()}")
sys_exit(status)

View File

@ -1,137 +0,0 @@
#!/usr/bin/python3
from os import _exit, getenv, makedirs, remove
from os.path import isfile
from sys import exit as sys_exit, path as sys_path
from time import sleep
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
sys_path.append("/opt/bunkerweb/db")
sys_path.append("/opt/bunkerweb/core/bunkernet/jobs")
from bunkernet import register, ping, get_id
from Database import Database
from logger import setup_logger
logger = setup_logger("BUNKERNET", getenv("LOG_LEVEL", "INFO"))
db = Database(
logger,
sqlalchemy_string=getenv("DATABASE_URI", None),
)
status = 0
try:
# Check if at least a server has BunkerNet activated
bunkernet_activated = False
# Multisite case
if getenv("MULTISITE") == "yes":
for first_server in getenv("SERVER_NAME").split(" "):
if (
getenv(f"{first_server}_USE_BUNKERNET", getenv("USE_BUNKERNET", "yes"))
== "yes"
):
bunkernet_activated = True
break
# Singlesite case
elif getenv("USE_BUNKERNET", "yes") == "yes":
bunkernet_activated = True
if not bunkernet_activated:
logger.info("BunkerNet is not activated, skipping registration...")
_exit(0)
# Create directory if it doesn't exist
makedirs("/opt/bunkerweb/cache/bunkernet", exist_ok=True)
# Ask an ID if needed
bunkernet_id = None
if not isfile("/opt/bunkerweb/cache/bunkernet/instance.id"):
logger.info("Registering instance on BunkerNet API ...")
ok, status, data = register()
if not ok:
logger.error(
f"Error while sending register request to BunkerNet API : {data}"
)
_exit(1)
elif status == 429:
logger.warning(
"BunkerNet API is rate limiting us, trying again later...",
)
_exit(0)
elif status != 200:
logger.error(
f"Error {status} from BunkerNet API : {data['data']}",
)
_exit(1)
elif data["result"] != "ok":
logger.error(
f"Received error from BunkerNet API while sending register request : {data['data']}"
)
_exit(1)
bunkernet_id = data["data"]
logger.info(
f"Successfully registered on BunkerNet API with instance id {data['data']}"
)
else:
with open("/opt/bunkerweb/cache/bunkernet/instance.id", "r") as f:
bunkernet_id = f.read()
logger.info(f"Already registered on BunkerNet API with instance id {get_id()}")
# Ping
logger.info("Checking connectivity with BunkerNet API ...")
bunkernet_ping = False
for i in range(0, 5):
ok, status, data = ping(bunkernet_id)
retry = False
if not ok:
logger.error(f"Error while sending ping request to BunkerNet API : {data}")
retry = True
elif status == 429:
logger.warning(
"BunkerNet API is rate limiting us, trying again later...",
)
retry = True
elif status == 401:
logger.warning(
"Instance ID is not registered, removing it and retrying a register later...",
)
remove("/opt/bunkerweb/cache/bunkernet/instance.id")
_exit(2)
elif data["result"] != "ok":
logger.error(
f"Received error from BunkerNet API while sending ping request : {data['data']}, removing instance ID",
)
retry = True
if not retry:
bunkernet_ping = True
break
logger.warning("Waiting 1s and trying again ...")
sleep(1)
if bunkernet_ping:
logger.info("Connectivity with BunkerWeb is successful !")
status = 1
if not isfile("/opt/bunkerweb/cache/bunkernet/instance.id"):
with open("/opt/bunkerweb/cache/bunkernet/instance.id", "w") as f:
f.write(bunkernet_id)
# Update db
err = db.update_job_cache(
"bunkernet-register",
None,
"instance.id",
f"{bunkernet_id}".encode("utf-8"),
)
if err:
logger.warning(f"Couldn't update db cache: {err}")
else:
logger.error("Connectivity with BunkerWeb failed ...")
status = 2
except:
status = 2
logger.error(f"Exception while running bunkernet-register.py :\n{format_exc()}")
sys_exit(status)

View File

@ -1,62 +0,0 @@
import requests, traceback
from os import getenv
def request(method, url, _id=None):
data = {"integration": get_integration(), "version": get_version()}
headers = {"User-Agent": f"BunkerWeb/{get_version()}"}
if _id is not None:
data["id"] = _id
try:
resp = requests.request(
method,
getenv("BUNKERNET_SERVER", "https://api.bunkerweb.io") + url,
json=data,
headers=headers,
timeout=5,
)
status = resp.status_code
if status == 429:
return True, 429, "rate limited"
raw_data = resp.json()
assert "result" in raw_data
assert "data" in raw_data
except Exception as e:
return False, None, traceback.format_exc()
return True, status, raw_data
def register():
return request("POST", "/register")
def ping(_id=None):
return request("GET", "/ping", _id=get_id() if _id is None else _id)
def data():
return request("GET", "/db", _id=get_id())
def get_id():
with open("/opt/bunkerweb/cache/bunkernet/instance.id", "r") as f:
return f.read().strip()
def get_version():
with open("/opt/bunkerweb/VERSION", "r") as f:
return f.read().strip()
def get_integration():
try:
if getenv("KUBERNETES_MODE") == "yes":
return "kubernetes"
if getenv("SWARM_MODE") == "yes":
return "swarm"
with open("/etc/os-release", "r") as f:
if f.read().contains("Alpine"):
return "docker"
return "linux"
except:
return "unknown"

View File

@ -1,100 +0,0 @@
#!/usr/bin/python3
from os import getenv, makedirs
from os.path import isfile
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
sys_path.append("/opt/bunkerweb/db")
from Database import Database
from jobs import file_hash
from logger import setup_logger
logger = setup_logger("CUSTOM-CERT", getenv("LOG_LEVEL", "INFO"))
db = Database(
logger,
sqlalchemy_string=getenv("DATABASE_URI", None),
)
def check_cert(cert_path, first_server: str = None):
try:
cache_path = (
f"/opt/bunkerweb/cache/customcert/{cert_path.replace('/', '_')}.hash"
)
current_hash = file_hash(cert_path)
if not isfile(cache_path):
with open(cache_path, "w") as f:
f.write(current_hash)
old_hash = file_hash(cache_path)
if old_hash == current_hash:
return False
with open(cache_path, "w") as f:
f.write(current_hash)
err = db.update_job_cache(
"custom-cert",
first_server,
f"{cert_path.replace('/', '_')}.hash",
current_hash.encode("utf-8"),
checksum=current_hash,
)
if err:
logger.warning(f"Couldn't update db cache: {err}")
return True
except:
logger.error(
f"Exception while running custom-cert.py (check_cert) :\n{format_exc()}",
)
return False
status = 0
try:
makedirs("/opt/bunkerweb/cache/customcert/", exist_ok=True)
# Multisite case
if getenv("MULTISITE") == "yes":
for first_server in getenv("SERVER_NAME").split(" "):
if (
getenv(first_server + "_USE_CUSTOM_HTTPS", getenv("USE_CUSTOM_HTTPS"))
!= "yes"
):
continue
if first_server == "":
continue
cert_path = getenv(first_server + "_CUSTOM_HTTPS_CERT")
logger.info(
f"Checking if certificate {cert_path} changed ...",
)
need_reload = check_cert(cert_path, first_server)
if need_reload:
logger.info(
f"Detected change for certificate {cert_path}",
)
status = 1
else:
logger.info(
"No change for certificate {cert_path}",
)
# Singlesite case
elif getenv("USE_CUSTOM_HTTPS") == "yes" and getenv("SERVER_NAME") != "":
cert_path = getenv("CUSTOM_HTTPS_CERT")
logger.info(f"Checking if certificate {cert_path} changed ...")
need_reload = check_cert(cert_path)
if need_reload:
logger.info(f"Detected change for certificate {cert_path}")
status = 1
else:
logger.info(f"No change for certificate {cert_path}")
except:
status = 2
logger.error(f"Exception while running custom-cert.py :\n{format_exc()}")
sys_exit(status)

View File

@ -1,47 +0,0 @@
{% if ERRORS != "" %}
{% for element in ERRORS.split(" ") %}
{% set code = element.split("=")[0] %}
{% set page = element.split("=")[1] %}
error_page {{ code }} {{ page }};
location = {{ page }} {
root {% if ROOT_FOLDER == "" %}/opt/bunkerweb/www/{% if MULTISITE == "yes" %}{{ SERVER_NAME.split(" ")[0] }}{% endif %}{% else %}{{ ROOT_FOLDER }}{% endif %};
modsecurity off;
internal;
}
{% endfor %}
{% endif %}
{% set default_errors = ["400", "401", "403", "404", "405", "413", "429", "500", "501", "502", "503", "504"] %}
{% for default_error in default_errors %}
{% if not default_error + "=" in ERRORS +%}
{% if default_error == "405" +%}
error_page 405 =200 @405;
{% else +%}
error_page {{ default_error }} @{{ default_error }};
{% endif +%}
location @{{ default_error }} {
auth_basic off;
internal;
modsecurity off;
default_type 'text/html';
content_by_lua_block {
local logger = require "logger"
local errors = require "errors.errors"
local html, err
if ngx.status == 200 then
html, err = errors.error_html(tostring(405))
else
html, err = errors.error_html(tostring(ngx.status))
end
if not html then
logger.log(ngx.ERR, "ERRORS", "Error while computing HTML error template for {{ default_error }} : " .. err)
else
ngx.say(html)
end
}
}
{% endif %}
{% endfor %}

View File

@ -1,124 +0,0 @@
local _M = {}
_M.__index = _M
local utils = require "utils"
local datastore = require "datastore"
local logger = require "logger"
local cjson = require "cjson"
function _M.new()
local self = setmetatable({}, _M)
return self, nil
end
function _M:init()
-- Save default errors into datastore
local default_errors = {
["400"] = {
title = "400 - Bad Request",
body1 = "Bad Request",
body2 = "400",
body3 = "The server did not understand the request."
},
["401"] = {
title = "401 - Not Authorized",
body1 = "Not Authorized",
body2 = "401",
body3 = "Valid authentication credentials needed for the target resource."
},
["403"] = {
title = "403 - Forbidden",
body1 = "Forbidden",
body2 = "403",
body3 = "Access is forbidden to the requested page."
},
["404"] = {
title = "404 - Not Found",
body1 = "Not Found",
body2 = "404",
body3 = "The server cannot find the requested page."
},
["405"] = {
title = "405 - Method Not Allowed",
body1 = "Method Not Allowed",
body2 = "405",
body3 = "The method specified in the request is not allowed."
},
["413"] = {
title = "413 - Request Entity Too Large",
body1 = "Request Entity Too Large",
body2 = "413",
body3 = "The server will not accept the request, because the request entity is too large."
},
["429"] = {
title = "429 - Too Many Requests",
body1 = "Too Many Requests",
body2 = "429",
body3 = "Too many requests sent in a given amount of time, try again later."
},
["500"] = {
title = "500 - Internal Server Error",
body1 = "Internal Server Error",
body2 = "500",
body3 = "The request was not completed. The server met an unexpected condition."
},
["501"] = {
title = "501 - Not Implemented",
body1 = "Not Implemented",
body2 = "501",
body3 = "The request was not completed. The server did not support the functionality required."
},
["502"] = {
title = "502 - Bad Gateway",
body1 = "Bad Gateway",
body2 = "502",
body3 = "The request was not completed. The server received an invalid response from the upstream server."
},
["503"] = {
title = "503 - Service Unavailable",
body1 = "Service Unavailable",
body2 = "503",
body3 = "The request was not completed. The server is temporarily overloading or down."
},
["504"] = {
title = "504 - Gateway Timeout",
body1 = "Gateway Timeout",
body2 = "504",
body3 = "The gateway has timed out."
}
}
local ok, err = datastore:set("plugin_errors_default_errors", cjson.encode(default_errors))
if not ok then
return false, "can't save default errors to datastore : " .. err
end
-- Save generic template into datastore
local f, err = io.open("/opt/bunkerweb/core/errors/files/error.html", "r")
if not f then
return false, "can't open error.html : " .. err
end
local template = f:read("*all")
f:close()
local ok, err = datastore:set("plugin_errors_template", template)
if not ok then
return false, "can't save error.html to datastore : " .. err
end
return true, "success"
end
function _M.error_html(code)
-- Load default errors texts
local default_errors, err = datastore:get("plugin_errors_default_errors")
if not default_errors then
return false, "can't get default errors from datastore : " .. err
end
default_errors = cjson.decode(default_errors)
-- Load template
local template, err = datastore:get("plugin_errors_template")
if not template then
return false, "can't get template from datastore : " .. err
end
-- Compute template
return template:format(default_errors[code].title, default_errors[code].body1, default_errors[code].body2, default_errors[code].body3), "success"
end
return _M

View File

@ -1,247 +0,0 @@
local _M = {}
_M.__index = _M
local utils = require "utils"
local datastore = require "datastore"
local logger = require "logger"
local cjson = require "cjson"
local ipmatcher = require "resty.ipmatcher"
function _M.new()
local self = setmetatable({}, _M)
return self, nil
end
function _M:init()
-- Check if init is needed
local init_needed, err = utils.has_variable("USE_GREYLIST", "yes")
if init_needed == nil then
return false, err
end
if not init_needed then
return true, "no service uses Greylist, skipping init"
end
-- Read greylists
local greylists = {
["IP"] = {},
["RDNS"] = {},
["ASN"] = {},
["USER_AGENT"] = {},
["URI"] = {}
}
local i = 0
for kind, _ in pairs(greylists) do
local f, err = io.open("/opt/bunkerweb/cache/greylist/" .. kind .. ".list", "r")
if f then
for line in f:lines() do
table.insert(greylists[kind], line)
i = i + 1
end
f:close()
end
end
-- Load them into datastore
local ok, err = datastore:set("plugin_greylist_list", cjson.encode(greylists))
if not ok then
return false, "can't store Greylist list into datastore : " .. err
end
return true, "successfully loaded " .. tostring(i) .. " greylisted IP/network/rDNS/ASN/User-Agent/URI"
end
function _M:access()
-- Check if access is needed
local access_needed, err = utils.get_variable("USE_GREYLIST")
if access_needed == nil then
return false, err, false, nil
end
if access_needed ~= "yes" then
return true, "Greylist not activated", false, nil
end
-- Check the cache
local cached_ip, err = self:is_in_cache("ip" .. ngx.var.remote_addr)
if cached_ip and cached_ip ~= "ok" then
return true, "IP is in greylist cache (info = " .. cached_ip .. ")", false, ngx.OK
end
local cached_uri, err = self:is_in_cache("uri" .. ngx.var.uri)
if cached_uri and cached_uri ~= "ok" then
return true, "URI is in greylist cache (info = " .. cached_uri .. ")", false, ngx.OK
end
local cached_ua = true
if ngx.var.http_user_agent then
cached_ua, err = self:is_in_cache("ua" .. ngx.var.http_user_agent)
if cached_ua and cached_ua ~= "ok" then
return true, "User-Agent is in greylist cache (info = " .. cached_ua .. ")", false, ngx.OK
end
end
if cached_ip and cached_uri and cached_ua then
return true, "full request is in greylist cache (not greylisted)", false, nil
end
-- Get list
local data, err = datastore:get("plugin_greylist_list")
if not data then
return false, "can't get Greylist list : " .. err, false, nil
end
local ok, greylists = pcall(cjson.decode, data)
if not ok then
return false, "error while decoding greylists : " .. greylists, false, nil
end
-- Return value
local ret, ret_err = true, "success"
-- Check if IP is in IP/net greylist
local ip_net, err = utils.get_variable("GREYLIST_IP")
if ip_net and ip_net ~= "" then
for element in ip_net:gmatch("%S+") do
table.insert(greylists["IP"], element)
end
end
if not cached_ip then
local ipm, err = ipmatcher.new(greylists["IP"])
if not ipm then
ret = false
ret_err = "can't instantiate ipmatcher " .. err
else
if ipm:match(ngx.var.remote_addr) then
self:add_to_cache("ip" .. ngx.var.remote_addr, "ip/net")
return ret, "client IP " .. ngx.var.remote_addr .. " is in greylist", false, ngx.OK
end
end
end
-- Check if rDNS is in greylist
local rdns_global, err = utils.get_variable("GREYLIST_RDNS_GLOBAL")
local check = true
if not rdns_global then
logger.log(ngx.ERR, "GREYLIST", "Error while getting GREYLIST_RDNS_GLOBAL variable : " .. err)
elseif rdns_global == "yes" then
check, err = utils.ip_is_global(ngx.var.remote_addr)
if check == nil then
logger.log(ngx.ERR, "GREYLIST", "Error while getting checking if IP is global : " .. err)
end
end
if not cached_ip and check then
local rdns, err = utils.get_rdns(ngx.var.remote_addr)
if not rdns then
ret = false
ret_err = "error while trying to get reverse dns : " .. err
else
local rdns_list, err = utils.get_variable("GREYLIST_RDNS")
if rdns_list and rdns_list ~= "" then
for element in rdns_list:gmatch("%S+") do
table.insert(greylists["RDNS"], element)
end
end
for i, suffix in ipairs(greylists["RDNS"]) do
if rdns:sub(- #suffix) == suffix then
self:add_to_cache("ip" .. ngx.var.remote_addr, "rDNS " .. suffix)
return ret, "client IP " .. ngx.var.remote_addr .. " is in greylist (info = rDNS " .. suffix .. ")", false, ngx.OK
end
end
end
end
-- Check if ASN is in greylist
if not cached_ip then
if utils.ip_is_global(ngx.var.remote_addr) then
local asn, err = utils.get_asn(ngx.var.remote_addr)
if not asn then
ret = false
ret_err = "error while trying to get asn number : " .. err
else
local asn_list, err = utils.get_variable("GREYLIST_ASN")
if asn_list and asn_list ~= "" then
for element in asn_list:gmatch("%S+") do
table.insert(greylists["ASN"], element)
end
end
for i, asn_bl in ipairs(greylists["ASN"]) do
if tostring(asn) == asn_bl then
self:add_to_cache("ip" .. ngx.var.remote_addr, "ASN " .. tostring(asn))
return ret, "client IP " .. ngx.var.remote_addr .. " is in greylist (kind = ASN " .. tostring(asn) .. ")", false,
ngx.OK
end
end
end
end
end
-- IP is not greylisted
local ok, err = self:add_to_cache("ip" .. ngx.var.remote_addr, "ok")
if not ok then
ret = false
ret_err = err
end
-- Check if User-Agent is in greylist
if not cached_ua and ngx.var.http_user_agent then
local ua_list, err = utils.get_variable("GREYLIST_USER_AGENT")
if ua_list and ua_list ~= "" then
for element in ua_list:gmatch("%S+") do
table.insert(greylists["USER_AGENT"], element)
end
end
for i, ua_bl in ipairs(greylists["USER_AGENT"]) do
if ngx.var.http_user_agent:match(ua_bl) then
self:add_to_cache("ua" .. ngx.var.http_user_agent, "UA " .. ua_bl)
return ret, "client User-Agent " .. ngx.var.http_user_agent .. " is in greylist (matched " .. ua_bl .. ")", false,
ngx.OK
end
end
-- UA is not greylisted
local ok, err = self:add_to_cache("ua" .. ngx.var.http_user_agent, "ok")
if not ok then
ret = false
ret_err = err
end
end
-- Check if URI is in greylist
if not cached_uri then
local uri_list, err = utils.get_variable("GREYLIST_URI")
if uri_list and uri_list ~= "" then
for element in uri_list:gmatch("%S+") do
table.insert(greylists["URI"], element)
end
end
for i, uri_bl in ipairs(greylists["URI"]) do
if ngx.var.uri:match(uri_bl) then
self:add_to_cache("uri" .. ngx.var.uri, "URI " .. uri_bl)
return ret, "client URI " .. ngx.var.uri .. " is in greylist (matched " .. uri_bl .. ")", false, ngx.OK
end
end
end
-- URI is not greylisted
local ok, err = self:add_to_cache("uri" .. ngx.var.uri, "ok")
if not ok then
ret = false
ret_err = err
end
return ret, "IP is not in list (error = " .. ret_err .. ")", true, utils.get_deny_status()
end
function _M:is_in_cache(ele)
local kind, err = datastore:get("plugin_greylist_cache_" .. ngx.var.server_name .. ele)
if not kind then
if err ~= "not found" then
logger.log(ngx.ERR, "GREYLIST", "Error while accessing cache : " .. err)
end
return false, err
end
return kind, "success"
end
function _M:add_to_cache(ele, kind)
local ok, err = datastore:set("plugin_greylist_cache_" .. ngx.var.server_name .. ele, kind, 3600)
if not ok then
logger.log(ngx.ERR, "GREYLIST", "Error while adding element to cache : " .. err)
return false, err
end
return true, "success"
end
return _M

View File

@ -1,186 +0,0 @@
#!/usr/bin/python3
from ipaddress import ip_address, ip_network
from os import _exit, getenv, makedirs
from re import match
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
sys_path.append("/opt/bunkerweb/db")
from requests import get
from Database import Database
from logger import setup_logger
from jobs import cache_file, cache_hash, is_cached_file, file_hash
def check_line(kind, line):
if kind == "IP":
if "/" in line:
try:
ip_network(line)
return True, line
except ValueError:
pass
else:
try:
ip_address(line)
return True, line
except ValueError:
pass
return False, ""
elif kind == "RDNS":
if match(r"^(\.?[A-Za-z0-9\-]+)*\.[A-Za-z]{2,}$", line):
return True, line.lower()
return False, ""
elif kind == "ASN":
real_line = line.replace("AS", "")
if match(r"^\d+$", real_line):
return True, real_line
elif kind == "USER_AGENT":
return True, line.replace("\\ ", " ").replace("\\.", "%.").replace(
"\\\\", "\\"
).replace("-", "%-")
elif kind == "URI":
if match(r"^/", line):
return True, line
return False, ""
logger = setup_logger("GREYLIST", getenv("LOG_LEVEL", "INFO"))
db = Database(
logger,
sqlalchemy_string=getenv("DATABASE_URI", None),
)
status = 0
try:
# Check if at least a server has Greylist activated
greylist_activated = False
# Multisite case
if getenv("MULTISITE") == "yes":
for first_server in getenv("SERVER_NAME").split(" "):
if getenv(first_server + "_USE_GREYLIST", getenv("USE_GREYLIST")) == "yes":
greylist_activated = True
break
# Singlesite case
elif getenv("USE_GREYLIST") == "yes":
greylist_activated = True
if not greylist_activated:
logger.info("Greylist is not activated, skipping downloads...")
_exit(0)
# Create directories if they don't exist
makedirs("/opt/bunkerweb/cache/greylist", exist_ok=True)
makedirs("/opt/bunkerweb/tmp/greylist", exist_ok=True)
# Our urls data
urls = {"IP": [], "RDNS": [], "ASN": [], "USER_AGENT": [], "URI": []}
# Don't go further if the cache is fresh
kinds_fresh = {
"IP": True,
"RDNS": True,
"ASN": True,
"USER_AGENT": True,
"URI": True,
}
all_fresh = True
for kind in kinds_fresh:
if not is_cached_file(f"/opt/bunkerweb/cache/greylist/{kind}.list", "hour"):
kinds_fresh[kind] = False
all_fresh = False
logger.info(
f"Greylist for {kind} is not cached, processing downloads..",
)
else:
logger.info(
f"Greylist for {kind} is already in cache, skipping downloads...",
)
if all_fresh:
_exit(0)
# Get URLs
urls = {
"IP": [],
"RDNS": [],
"ASN": [],
"USER_AGENT": [],
"URI": [],
}
for kind in urls:
for url in getenv(f"GREYLIST_{kind}_URLS", "").split(" "):
if url != "" and url not in urls[kind]:
urls[kind].append(url)
# Loop on kinds
for kind, urls_list in urls.items():
if kinds_fresh[kind]:
continue
# Write combined data of the kind to a single temp file
for url in urls_list:
try:
logger.info(f"Downloading greylist data from {url} ...")
resp = get(url)
if resp.status_code != 200:
continue
i = 0
with open(f"/opt/bunkerweb/tmp/greylist/{kind}.list", "w") as f:
for line in resp.content.decode("utf-8").splitlines():
line = line.strip()
if kind != "USER_AGENT":
line = line.strip().split(" ")[0]
if line == "" or line.startswith("#") or line.startswith(";"):
continue
ok, data = check_line(kind, line)
if ok:
f.write(data + "\n")
i += 1
logger.info(f"Downloaded {i} bad {kind}")
# Check if file has changed
new_hash = file_hash(f"/opt/bunkerweb/tmp/greylist/{kind}.list")
old_hash = cache_hash(f"/opt/bunkerweb/cache/greylist/{kind}.list")
if new_hash == old_hash:
logger.info(
f"New file {kind}.list is identical to cache file, reload is not needed",
)
else:
logger.info(
f"New file {kind}.list is different than cache file, reload is needed",
)
# Put file in cache
cached, err = cache_file(
f"/opt/bunkerweb/tmp/greylist/{kind}.list",
f"/opt/bunkerweb/cache/greylist/{kind}.list",
new_hash,
)
if not cached:
logger.error(f"Error while caching greylist : {err}")
status = 2
if status != 2:
# Update db
err = db.update_job_cache(
"greylist-download",
None,
f"{kind}.list",
resp.content,
checksum=new_hash,
)
if err:
logger.warning(f"Couldn't update db cache: {err}")
status = 1
except:
status = 2
logger.error(
f"Exception while getting greylist from {url} :\n{format_exc()}"
)
except:
status = 2
logger.error(f"Exception while running greylist-download.py :\n{format_exc()}")
sys_exit(status)

View File

@ -1,100 +0,0 @@
#!/usr/bin/python3
from io import BytesIO
from os import getenv, makedirs, chmod, stat, _exit
from os.path import isfile, dirname
from stat import S_IEXEC
from sys import exit as sys_exit, path as sys_path
from uuid import uuid4
from glob import glob
from json import loads
from shutil import copytree, rmtree
from traceback import format_exc
from zipfile import ZipFile
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
from requests import get
from logger import setup_logger
logger = setup_logger("Jobs", getenv("LOG_LEVEL", "INFO"))
status = 0
def install_plugin(plugin_dir):
# Load plugin.json
metadata = {}
with open(f"{plugin_dir}plugin.json", "r") as f:
metadata = loads(f.read())
# Don't go further if plugin is already installed
if isfile(f"/data/plugins/{metadata['id']}/plugin.json"):
logger.info(
f"Skipping installation of plugin {metadata['id']} (already installed)",
)
return
# Copy the plugin
copytree(plugin_dir, f"/data/plugins/{metadata['id']}")
# Add u+x permissions to jobs files
for job_file in glob(f"{plugin_dir}jobs/*"):
st = stat(job_file)
chmod(job_file, st.st_mode | S_IEXEC)
try:
# Check if we have plugins to download
plugin_urls = getenv("EXTERNAL_PLUGIN_URLS", "")
if plugin_urls == "":
logger.info("No external plugins to download")
_exit(0)
# Loop on URLs
for plugin_url in plugin_urls.split(" "):
# Download ZIP file
try:
req = get(plugin_url)
except:
logger.error(
f"Exception while downloading plugin(s) from {plugin_url} :",
)
print(format_exc())
status = 2
continue
# Extract it to tmp folder
temp_dir = "/opt/bunkerweb/tmp/plugins-" + str(uuid4()) + "/"
try:
makedirs(temp_dir, exist_ok=True)
with ZipFile(BytesIO(req.content)) as zf:
zf.extractall(path=temp_dir)
except:
logger.error(
f"Exception while decompressing plugin(s) from {plugin_url} :",
)
print(format_exc())
status = 2
continue
# Install plugins
try:
for plugin_dir in glob(temp_dir + "**/plugin.json", recursive=True):
install_plugin(dirname(plugin_dir) + "/")
except:
logger.error(
f"Exception while installing plugin(s) from {plugin_url} :",
)
print(format_exc())
status = 2
continue
except:
status = 2
logger.error(f"Exception while running download-plugins.py :\n{format_exc()}")
for plugin_tmp in glob("/opt/bunkerweb/tmp/plugins-*/"):
rmtree(plugin_tmp)
sys_exit(status)

View File

@ -1,85 +0,0 @@
#!/usr/bin/python3
from datetime import date
from gzip import decompress
from os import _exit, getenv
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
sys_path.append("/opt/bunkerweb/db")
from maxminddb import open_database
from requests import get
from Database import Database
from logger import setup_logger
from jobs import cache_file, cache_hash, file_hash, is_cached_file
logger = setup_logger("JOBS", getenv("LOG_LEVEL", "INFO"))
db = Database(
logger,
sqlalchemy_string=getenv("DATABASE_URI", None),
)
status = 0
try:
# Don't go further if the cache is fresh
if is_cached_file("/opt/bunkerweb/cache/asn.mmdb", "month"):
logger.info("asn.mmdb is already in cache, skipping download...")
_exit(0)
# Compute the mmdb URL
today = date.today()
mmdb_url = "https://download.db-ip.com/free/dbip-asn-lite-{}-{}.mmdb.gz".format(
today.strftime("%Y"), today.strftime("%m")
)
# Download the mmdb file
logger.info(f"Downloading mmdb file from url {mmdb_url} ...")
resp = get(mmdb_url)
# Save it to temp
logger.info("Saving mmdb file to tmp ...")
with open("/opt/bunkerweb/tmp/asn.mmdb", "wb") as f:
f.write(decompress(resp.content))
# Try to load it
logger.info("Checking if mmdb file is valid ...")
with open_database("/opt/bunkerweb/tmp/asn.mmdb") as reader:
pass
# Check if file has changed
new_hash = file_hash("/opt/bunkerweb/tmp/asn.mmdb")
old_hash = cache_hash("/opt/bunkerweb/cache/asn.mmdb")
if new_hash == old_hash:
logger.info("New file is identical to cache file, reload is not needed")
_exit(0)
# Move it to cache folder
logger.info("Moving mmdb file to cache ...")
cached, err = cache_file(
"/opt/bunkerweb/tmp/asn.mmdb", "/opt/bunkerweb/cache/asn.mmdb", new_hash
)
if not cached:
logger.error(f"Error while caching mmdb file : {err}")
_exit(2)
# Update db
err = db.update_job_cache(
"mmdb-asn", None, "asn.mmdb", resp.content, checksum=new_hash
)
if err:
logger.warning(f"Couldn't update db cache: {err}")
# Success
logger.info(f"Downloaded new mmdb from {mmdb_url}")
status = 1
except:
status = 2
logger.error(f"Exception while running mmdb-asn.py :\n{format_exc()}")
sys_exit(status)

View File

@ -1,87 +0,0 @@
#!/usr/bin/python3
from datetime import date
from gzip import decompress
from os import _exit, getenv
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
sys_path.append("/opt/bunkerweb/db")
from requests import get
from maxminddb import open_database
from Database import Database
from logger import setup_logger
from jobs import cache_file, cache_hash, file_hash, is_cached_file
logger = setup_logger("JOBS", getenv("LOG_LEVEL", "INFO"))
db = Database(
logger,
sqlalchemy_string=getenv("DATABASE_URI", None),
)
status = 0
try:
# Don't go further if the cache is fresh
if is_cached_file("/opt/bunkerweb/cache/country.mmdb", "month"):
logger.info("country.mmdb is already in cache, skipping download...")
_exit(0)
# Compute the mmdb URL
today = date.today()
mmdb_url = "https://download.db-ip.com/free/dbip-country-lite-{}-{}.mmdb.gz".format(
today.strftime("%Y"), today.strftime("%m")
)
# Download the mmdb file
logger.info(f"Downloading mmdb file from url {mmdb_url} ...")
resp = get(mmdb_url)
# Save it to temp
logger.info("Saving mmdb file to tmp ...")
with open("/opt/bunkerweb/tmp/country.mmdb", "wb") as f:
f.write(decompress(resp.content))
# Try to load it
logger.info("Checking if mmdb file is valid ...")
with open_database("/opt/bunkerweb/tmp/country.mmdb") as reader:
pass
# Check if file has changed
new_hash = file_hash("/opt/bunkerweb/tmp/country.mmdb")
old_hash = cache_hash("/opt/bunkerweb/cache/country.mmdb")
if new_hash == old_hash:
logger.info("New file is identical to cache file, reload is not needed")
_exit(0)
# Move it to cache folder
logger.info("Moving mmdb file to cache ...")
cached, err = cache_file(
"/opt/bunkerweb/tmp/country.mmdb",
"/opt/bunkerweb/cache/country.mmdb",
new_hash,
)
if not cached:
logger.error(f"Error while caching mmdb file : {err}")
_exit(2)
# Update db
err = db.update_job_cache(
"mmdb-country", None, "country.mmdb", resp.content, checksum=new_hash
)
if err:
logger.warning(f"Couldn't update db cache: {err}")
# Success
logger.info(f"Downloaded new mmdb from {mmdb_url}")
status = 1
except:
status = 2
logger.error(f"Exception while running mmdb-country.py :\n{format_exc()}")
sys_exit(status)

View File

@ -1,4 +0,0 @@
# set location for challenges
location ~ ^/.well-known/acme-challenge/ {
root /opt/bunkerweb/tmp/lets-encrypt;
}

View File

@ -1,24 +0,0 @@
# set location for challenges
location ~ ^/.well-known/acme-challenge/ {
root /opt/bunkerweb/tmp/lets-encrypt;
}
{% if AUTO_LETS_ENCRYPT == "yes" %}
# listen on HTTPS PORT
listen 0.0.0.0:{{ HTTPS_PORT }} ssl {% if HTTP2 == "yes" %}http2{% endif %} {% if USE_PROXY_PROTOCOL == "yes" %}proxy_protocol{% endif %};
# TLS config
ssl_certificate /etc/letsencrypt/live/{{ SERVER_NAME.split(" ")[0] }}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/{{ SERVER_NAME.split(" ")[0] }}/privkey.pem;
ssl_protocols {{ HTTPS_PROTOCOLS }};
ssl_prefer_server_ciphers on;
ssl_session_tickets off;
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m;
{% if "TLSv1.2" in HTTPS_PROTOCOLS +%}
ssl_dhparam /etc/nginx/dhparam;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
{% endif %}
{% endif %}

View File

@ -1,63 +0,0 @@
#!/usr/bin/python3
import sys, os, traceback
sys.path.append("/opt/bunkerweb/deps/python")
sys.path.append("/opt/bunkerweb/utils")
sys.path.append("/opt/bunkerweb/api")
from logger import setup_logger
from API import API
logger = setup_logger("Lets-encrypt", os.getenv("LOG_LEVEL", "INFO"))
status = 0
try:
# Get env vars
is_kubernetes_mode = os.getenv("KUBERNETES_MODE") == "yes"
is_swarm_mode = os.getenv("SWARM_MODE") == "yes"
is_autoconf_mode = os.getenv("AUTOCONF_MODE") == "yes"
token = os.getenv("CERTBOT_TOKEN")
validation = os.getenv("CERTBOT_VALIDATION")
# Cluster case
if is_kubernetes_mode or is_swarm_mode or is_autoconf_mode:
for variable, value in os.environ.items():
if not variable.startswith("CLUSTER_INSTANCE_"):
continue
endpoint = value.split(" ")[0]
host = value.split(" ")[1]
api = API(endpoint, host=host)
sent, err, status, resp = api.request(
"POST",
"/lets-encrypt/challenge",
data={"token": token, "validation": validation},
)
if not sent:
status = 1
logger.error(
f"Can't send API request to {api.get_endpoint()}/lets-encrypt/challenge : {err}"
)
else:
if status != 200:
status = 1
logger.error(
f"Error while sending API request to {api.get_endpoint()}/lets-encrypt/challenge : status = {resp['status']}, msg = {resp['msg']}",
)
else:
logger.info(
f"Successfully sent API request to {api.get_endpoint()}/lets-encrypt/challenge",
)
# Docker or Linux case
else:
root_dir = "/opt/bunkerweb/tmp/lets-encrypt/.well-known/acme-challenge/"
os.makedirs(root_dir, exist_ok=True)
with open(root_dir + token, "w") as f:
f.write(validation)
except:
status = 1
logger.error("Exception while running certbot-auth.py :")
print(traceback.format_exc())
sys.exit(status)

View File

@ -1,61 +0,0 @@
#!/usr/bin/python3
import sys, os, traceback
sys.path.append("/opt/bunkerweb/deps/python")
sys.path.append("/opt/bunkerweb/utils")
sys.path.append("/opt/bunkerweb/api")
from logger import setup_logger
from API import API
logger = setup_logger("Lets-encrypt", os.getenv("LOG_LEVEL", "INFO"))
status = 0
try:
# Get env vars
is_kubernetes_mode = os.getenv("KUBERNETES_MODE") == "yes"
is_swarm_mode = os.getenv("SWARM_MODE") == "yes"
is_autoconf_mode = os.getenv("AUTOCONF_MODE") == "yes"
token = os.getenv("CERTBOT_TOKEN")
# Cluster case
if is_kubernetes_mode or is_swarm_mode or is_autoconf_mode:
for variable, value in os.environ.items():
if not variable.startswith("CLUSTER_INSTANCE_"):
continue
endpoint = value.split(" ")[0]
host = value.split(" ")[1]
api = API(endpoint, host=host)
sent, err, status, resp = api.request(
"DELETE", "/lets-encrypt/challenge", data={"token": token}
)
if not sent:
status = 1
logger.error(
f"Can't send API request to {api.get_endpoint()}/lets-encrypt/challenge : {err}"
)
else:
if status != 200:
status = 1
logger.error(
f"Error while sending API request to {api.get_endpoint()}/lets-encrypt/challenge : status = {resp['status']}, msg = {resp['msg']}",
)
else:
logger.info(
f"Successfully sent API request to {api.get_endpoint()}/lets-encrypt/challenge",
)
# Docker or Linux case
else:
challenge_path = (
f"/opt/bunkerweb/tmp/lets-encrypt/.well-known/acme-challenge/{token}"
)
if os.path.isfile(challenge_path):
os.remove(challenge_path)
except:
status = 1
logger.error("Exception while running certbot-cleanup.py :")
print(traceback.format_exc())
sys.exit(status)

View File

@ -1,144 +0,0 @@
#!/usr/bin/python3
from asyncio import run
from io import BytesIO
from os import environ, getenv
from os.path import exists
from subprocess import DEVNULL, STDOUT
from sys import exit as sys_exit, path as sys_path
from tarfile import open as tar_open
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
sys_path.append("/opt/bunkerweb/api")
from docker import DockerClient
from logger import setup_logger
from API import API
logger = setup_logger("Lets-encrypt", getenv("LOG_LEVEL", "INFO"))
status = 0
try:
# Get env vars
bw_integration = None
if getenv("KUBERNETES_MODE") == "yes":
bw_integration = "Swarm"
elif getenv("SWARM_MODE") == "yes":
bw_integration = "Kubernetes"
elif getenv("AUTOCONF_MODE") == "yes":
bw_integration = "Autoconf"
elif exists("/opt/bunkerweb/INTEGRATION"):
with open("/opt/bunkerweb/INTEGRATION", "r") as f:
bw_integration = f.read().strip()
token = getenv("CERTBOT_TOKEN")
# Cluster case
if bw_integration in ("Swarm", "Kubernetes", "Autoconf"):
# Create tarball of /data/cache/letsencrypt
tgz = BytesIO()
with tar_open(mode="w:gz", fileobj=tgz) as tf:
tf.add("/data/cache/letsencrypt", arcname=".")
tgz.seek(0, 0)
files = {"archive.tar.gz": tgz}
for variable, value in environ.items():
if not variable.startswith("CLUSTER_INSTANCE_"):
continue
endpoint = value.split(" ")[0]
host = value.split(" ")[1]
api = API(endpoint, host=host)
sent, err, status, resp = api.request(
"POST", "/lets-encrypt/certificates", files=files
)
if not sent:
status = 1
logger.error(
f"Can't send API request to {api.get_endpoint()}/lets-encrypt/certificates : {err}"
)
else:
if status != 200:
status = 1
logger.error(
f"Error while sending API request to {api.get_endpoint()}/lets-encrypt/certificates : status = {resp['status']}, msg = {resp['msg']}"
)
else:
logger.info(
f"Successfully sent API request to {api.get_endpoint()}/lets-encrypt/certificates",
)
sent, err, status, resp = api.request("POST", "/reload")
if not sent:
status = 1
logger.error(
f"Can't send API request to {api.get_endpoint()}/reload : {err}"
)
else:
if status != 200:
status = 1
logger.error(
f"Error while sending API request to {api.get_endpoint()}/reload : status = {resp['status']}, msg = {resp['msg']}"
)
else:
logger.info(
f"Successfully sent API request to {api.get_endpoint()}/reload"
)
# Docker or Linux case
elif bw_integration == "Docker":
docker_client = DockerClient(
base_url=getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
)
apis = []
for instance in docker_client.containers.list(
filters={"label": "bunkerweb.INSTANCE"}
):
api = None
for var in instance.attrs["Config"]["Env"]:
if var.startswith("API_HTTP_PORT="):
api = API(
f"http://{instance.name}:{var.replace('API_HTTP_PORT=', '', 1)}"
)
break
if api:
apis.append(api)
else:
apis.append(
API(f"http://{instance.name}:{getenv('API_HTTP_PORT', '5000')}")
)
for api in apis:
sent, err, status, resp = api.request("POST", "/reload")
if not sent:
status = 1
logger.error(
f"Can't send API request to {api.get_endpoint()}/reload : {err}"
)
else:
if status != 200:
status = 1
logger.error(
f"Error while sending API request to {api.get_endpoint()}/reload : status = {resp['status']}, msg = {resp['msg']}"
)
else:
logger.info(
f"Successfully sent API request to {api.get_endpoint()}/reload"
)
elif bw_integration == "Linux":
cmd = "/usr/sbin/nginx -s reload"
proc = run(cmd.split(" "), stdin=DEVNULL, stderr=STDOUT)
if proc.returncode != 0:
status = 1
logger.error("Error while reloading nginx")
else:
logger.info("Successfully reloaded nginx")
except:
status = 1
logger.error(f"Exception while running certbot-deploy.py :\n{format_exc()}")
sys_exit(status)

View File

@ -1,96 +0,0 @@
#!/usr/bin/python3
from os import environ, getenv
from os.path import exists
from subprocess import DEVNULL, STDOUT, run
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
from logger import setup_logger
def certbot_new(domains, email):
cmd = f"/opt/bunkerweb/deps/python/bin/certbot certonly --manual --preferred-challenges=http --manual-auth-hook /opt/bunkerweb/core/letsencrypt/jobs/certbot-auth.py --manual-cleanup-hook /opt/bunkerweb/core/letsencrypt/jobs/certbot-cleanup.py -n -d {domains} --email {email} --agree-tos"
if getenv("USE_LETS_ENCRYPT_STAGING") == "yes":
cmd += " --staging"
environ["PYTHONPATH"] = "/opt/bunkerweb/deps/python"
proc = run(
cmd.split(" "),
stdin=DEVNULL,
stderr=STDOUT,
env=environ,
)
return proc.returncode
logger = setup_logger("LETS-ENCRYPT", getenv("LOG_LEVEL", "INFO"))
status = 0
try:
# Multisite case
if getenv("MULTISITE") == "yes":
for first_server in getenv("SERVER_NAME").split(" "):
if (
getenv(f"{first_server}_AUTO_LETS_ENCRYPT", getenv("AUTO_LETS_ENCRYPT"))
!= "yes"
):
continue
if first_server == "":
continue
real_server_name = getenv(f"{first_server}_SERVER_NAME", first_server)
domains = real_server_name.replace(" ", ",")
if exists(f"/etc/letsencrypt/live/{first_server}/cert.pem"):
logger.info(
f"Certificates already exists for domain(s) {domains}",
)
continue
real_email = getenv(
f"{first_server}_EMAIL_LETS_ENCRYPT",
getenv("EMAIL_LETS_ENCRYPT", f"contact@{first_server}"),
)
if real_email == "":
real_email = f"contact@{first_server}"
logger.info(
f"Asking certificates for domains : {domains} (email = {real_email}) ...",
)
if certbot_new(domains, real_email) != 0:
status = 1
logger.error(
f"Certificate generation failed for domain(s) {domains} ...",
)
else:
logger.info(
f"Certificate generation succeeded for domain(s) : {domains}"
)
# Singlesite case
elif getenv("AUTO_LETS_ENCRYPT") == "yes" and getenv("SERVER_NAME") != "":
first_server = getenv("SERVER_NAME").split(" ")[0]
domains = getenv("SERVER_NAME").replace(" ", ",")
if exists(f"/etc/letsencrypt/live/{first_server}/cert.pem"):
logger.info(f"Certificates already exists for domain(s) {domains}")
else:
real_email = getenv("EMAIL_LETS_ENCRYPT", f"contact@{first_server}")
if real_email == "":
real_email = f"contact@{first_server}"
logger.info(
f"Asking certificates for domain(s) : {domains} (email = {real_email}) ...",
)
if certbot_new(domains, real_email) != 0:
status = 2
logger.error(f"Certificate generation failed for domain(s) : {domains}")
else:
logger.info(
f"Certificate generation succeeded for domain(s) : {domains}"
)
except:
status = 1
logger.error(f"Exception while running certbot-new.py :\n{format_exc()}")
sys_exit(status)

View File

@ -1,72 +0,0 @@
#!/usr/bin/python3
from os import environ, getenv
from os.path import exists
from subprocess import DEVNULL, STDOUT, run
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
from logger import setup_logger
def renew(domain):
cmd = f"/opt/bunkerweb/deps/python/bin/certbot renew --cert-name {domain} --deploy-hook /opt/bunkerweb/core/letsencrypt/jobs/certbot-deploy.py"
environ["PYTHONPATH"] = "/opt/bunkerweb/deps/python"
proc = run(
cmd.split(" "),
stdin=DEVNULL,
stderr=STDOUT,
env=environ,
)
return proc.returncode
logger = setup_logger("LETS-ENCRYPT", getenv("LOG_LEVEL", "INFO"))
status = 0
try:
if getenv("MULTISITE") == "yes":
for first_server in getenv("SERVER_NAME").split(" "):
if first_server == "":
continue
if (
getenv(f"{first_server}_AUTO_LETS_ENCRYPT", getenv("AUTO_LETS_ENCRYPT"))
!= "yes"
):
continue
if not exists(f"/etc/letsencrypt/live/{first_server}/cert.pem"):
continue
ret = renew(first_server)
if ret != 0:
status = 2
logger.error(
f"Certificates renewal for {first_server} failed",
)
else:
logger.info(
f"Certificates renewal for {first_server} successful",
)
elif getenv("AUTO_LETS_ENCRYPT") == "yes" and getenv("SERVER_NAME") != "":
first_server = getenv("SERVER_NAME").split(" ")[0]
if exists(f"/etc/letsencrypt/live/{first_server}/cert.pem"):
ret = renew(first_server)
if ret != 0:
status = 2
logger.error(
f"Certificates renewal for {first_server} failed",
)
else:
logger.info(
f"Certificates renewal for {first_server} successful",
)
except:
status = 2
logger.error(f"Exception while running certbot-renew.py :\n{format_exc()}")
sys_exit(status)

View File

@ -1,49 +0,0 @@
local _M = {}
_M.__index = _M
local logger = require "logger"
local cjson = require "cjson"
function _M.new()
local self = setmetatable({}, _M)
return self, nil
end
function _M:access()
if string.sub(ngx.var.uri, 1, string.len("/.well-known/acme-challenge/")) == "/.well-known/acme-challenge/" then
logger.log(ngx.NOTICE, "LETS-ENCRYPT", "Got a visit from Let's Encrypt, let's whitelist it.")
return true, "success", true, ngx.exit(ngx.OK)
end
return true, "success", false, nil
end
function _M:api()
if not string.match(ngx.var.uri, "^/lets%-encrypt/challenge$") or (ngx.var.request_method ~= "POST" and ngx.var.request_method ~= "DELETE") then
return false, nil, nil
end
local acme_folder = "/opt/bunkerweb/tmp/lets-encrypt/.well-known/acme-challenge/"
ngx.req.read_body()
local ret, data = pcall(cjson.decode, ngx.req.get_body_data())
if not ret then
return true, ngx.HTTP_BAD_REQUEST, {status = "error", msg = "json body decoding failed"}
end
os.execute("mkdir -p " .. acme_folder)
if ngx.var.request_method == "POST" then
local file, err = io.open(acme_folder .. data.token, "w+")
if not file then
return true, ngx.HTTP_INTERNAL_SERVER_ERROR, {status = "error", msg = "can't write validation token : " .. err}
end
file:write(data.validation)
file:close()
return true, ngx.HTTP_OK, {status = "success", msg = "validation token written"}
elseif ngx.var.request_method == "DELETE" then
local ok, err = os.remove(acme_folder .. data.token)
if not ok then
return true, ngx.HTTP_INTERNAL_SERVER_ERROR, {status = "error", msg = "can't remove validation token : " .. err}
end
return true, ngx.HTTP_OK, {status = "success", msg = "validation token removed"}
end
return true, ngx.HTTP_NOT_FOUND, {status = "error", msg = "unknown request"}
end
return _M

View File

@ -1,6 +0,0 @@
{% if SERVE_FILES == "yes" +%}
root {% if ROOT_FOLDER == "" %}/opt/bunkerweb/www/{% if MULTISITE == "yes" %}{{ SERVER_NAME.split(" ")[0] }}{% endif %}{% else %}{{ ROOT_FOLDER }}{% endif %};
try_files $uri $uri/ =404;
{% else +%}
root /nowhere;
{% endif %}

View File

@ -1,166 +0,0 @@
{
"id": "misc",
"order": 999,
"name": "Miscellaneous",
"description": "Miscellaneous settings.",
"version": "0.1",
"settings": {
"DISABLE_DEFAULT_SERVER": {
"context": "global",
"default": "no",
"help": "Close connection if the request vhost is unknown.",
"id": "disable-default-server",
"label": "Disable default server",
"regex": "^(yes|no)$",
"type": "check"
},
"REDIRECT_HTTP_TO_HTTPS": {
"context": "multisite",
"default": "no",
"help": "Redirect all HTTP request to HTTPS.",
"id": "redirect-http-to-https",
"label": "Redirect HTTP to HTTPS",
"regex": ".*",
"type": "text"
},
"AUTO_REDIRECT_HTTP_TO_HTTPS": {
"context": "multisite",
"default": "yes",
"help": "Try to detect if HTTPS is used and activate HTTP to HTTPS redirection if that's the case.",
"id": "auto-redirect-http-to-https",
"label": "Auto redirect HTTP to HTTPS",
"regex": ".*",
"type": "text"
},
"ALLOWED_METHODS": {
"context": "multisite",
"default": "GET|POST|HEAD",
"help": "Allowed HTTP methods to be sent by clients.",
"id": "allowed-methods",
"label": "Allowed methods",
"regex": ".*",
"type": "text"
},
"MAX_CLIENT_SIZE": {
"context": "multisite",
"default": "10m",
"help": "Maximum body size (0 for infinite).",
"id": "max-client-size",
"label": "Maximum body size",
"regex": ".*",
"type": "text"
},
"SERVE_FILES": {
"context": "multisite",
"default": "yes",
"help": "Serve files from the local folder.",
"id": "serve-files",
"label": "Serve files",
"regex": "^(yes|no)$",
"type": "check"
},
"ROOT_FOLDER": {
"context": "multisite",
"default": "",
"help": "Root folder containing files to serve (/opt/bunkerweb/www/{server_name} if unset).",
"id": "root-folder",
"label": "Root folder",
"regex": "^.*$",
"type": "text"
},
"HTTPS_PROTOCOLS": {
"context": "multisite",
"default": "TLSv1.2 TLSv1.3",
"help": "The supported version of TLS. We recommend the default value TLSv1.2 TLSv1.3 for compatibility reasons.",
"id": "https-protocols",
"label": "HTTPS protocols",
"regex": ".*",
"type": "text"
},
"HTTP2": {
"context": "multisite",
"default": "yes",
"help": "Support HTTP2 protocol when HTTPS is enabled.",
"id": "http2",
"label": "HTTP2",
"regex": ".*",
"type": "check"
},
"LISTEN_HTTP": {
"context": "multisite",
"default": "yes",
"help": "Respond to (insecure) HTTP requests.",
"id": "http-listen",
"label": "HTTP listen",
"regex": "^(yes|no)$",
"type": "check"
},
"USE_OPEN_FILE_CACHE": {
"context": "multisite",
"default": "no",
"help": "Enable open file cache feature",
"id": "use-open-file-cache",
"label": "Use open file cache",
"regex": "^(yes|no)$",
"type": "check"
},
"OPEN_FILE_CACHE": {
"context": "multisite",
"default": "max=1000 inactive=20s",
"help": "Open file cache directive",
"id": "open-file-cache",
"label": "Use open file cache",
"regex": "^.*$",
"type": "text"
},
"OPEN_FILE_CACHE_ERRORS": {
"context": "multisite",
"default": "yes",
"help": "Enable open file cache for errors",
"id": "open-file-cache-errors",
"label": "Open file cache errors",
"regex": "^(yes|no)$",
"type": "text"
},
"OPEN_FILE_CACHE_MIN_USES": {
"context": "multisite",
"default": "2",
"help": "Enable open file cache minimum uses",
"id": "open-file-cache-min-uses",
"label": "Open file cache min uses",
"regex": "^([1-9]+)$",
"type": "text"
},
"OPEN_FILE_CACHE_VALID": {
"context": "multisite",
"default": "30s",
"help": "Open file cache valid time",
"id": "open-file-cache-valid",
"label": "Open file cache valid time",
"regex": "^\\d+(ms|s|m|h|d|w|M|y)$",
"type": "text"
},
"EXTERNAL_PLUGIN_URLS": {
"context": "global",
"default": "",
"help": "List of external plugins URLs (direct download to .zip file) to download and install (URLs are separated with space).",
"id": "external-plugin-urls",
"label": "External plugin URLs",
"regex": "^.*$",
"type": "text"
},
"DENY_HTTP_STATUS": {
"context": "global",
"default": "403",
"help": "HTTP status code to send when the request is denied (403 or 444). When using 444, BunkerWeb will close the connection.",
"id": "deny-http-status",
"label": "Deny HTTP status",
"regex": "^(403|444)$",
"type": "select",
"select": [
"403",
"444"
]
}
}
}

View File

@ -1,121 +0,0 @@
# process rules with disruptive actions
SecRuleEngine {{ MODSECURITY_SEC_RULE_ENGINE }}
# allow body checks
SecRequestBodyAccess On
# enable XML parsing
SecRule REQUEST_HEADERS:Content-Type "(?:application(?:/soap\+|/)|text/)xml" \
"id:'200000',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=XML"
# enable JSON parsing
SecRule REQUEST_HEADERS:Content-Type "application/json" \
"id:'200001',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=JSON"
# maximum data size
{% if MAX_CLIENT_SIZE.endswith("k") or MAX_CLIENT_SIZE.endswith("K") %}
SecRequestBodyLimit {{ MAX_CLIENT_SIZE[:-1] | int * 1024 }}
{% elif MAX_CLIENT_SIZE.endswith("m") or MAX_CLIENT_SIZE.endswith("M") %}
SecRequestBodyLimit {{ MAX_CLIENT_SIZE[:-1] | int * 1024 * 1024 }}
{% elif MAX_CLIENT_SIZE.endswith("g") or MAX_CLIENT_SIZE.endswith("G") %}
SecRequestBodyLimit {{ MAX_CLIENT_SIZE[:-1] | int * 1024 * 1024 * 1024 }}
{% elif MAX_CLIENT_SIZE.isdigit() %}
SecRequestBodyLimit {{ MAX_CLIENT_SIZE }}
{% else %}
SecRequestBodyLimit 13107200
{% endif %}
SecRequestBodyNoFilesLimit 131072
# reject requests if bigger than max data size
SecRequestBodyLimitAction Reject
# reject if we can't process the body
SecRule REQBODY_ERROR "!@eq 0" \
"id:'200002', phase:2,t:none,log,deny,status:400,msg:'Failed to parse request body.',logdata:'%{reqbody_error_msg}',severity:2"
# be strict with multipart/form-data body
SecRule MULTIPART_STRICT_ERROR "!@eq 0" \
"id:'200003',phase:2,t:none,log,deny,status:400, \
msg:'Multipart request body failed strict validation: \
PE %{REQBODY_PROCESSOR_ERROR}, \
BQ %{MULTIPART_BOUNDARY_QUOTED}, \
BW %{MULTIPART_BOUNDARY_WHITESPACE}, \
DB %{MULTIPART_DATA_BEFORE}, \
DA %{MULTIPART_DATA_AFTER}, \
HF %{MULTIPART_HEADER_FOLDING}, \
LF %{MULTIPART_LF_LINE}, \
SM %{MULTIPART_MISSING_SEMICOLON}, \
IQ %{MULTIPART_INVALID_QUOTING}, \
IP %{MULTIPART_INVALID_PART}, \
IH %{MULTIPART_INVALID_HEADER_FOLDING}, \
FL %{MULTIPART_FILE_LIMIT_EXCEEDED}'"
SecRule MULTIPART_UNMATCHED_BOUNDARY "@eq 1" \
"id:'200004',phase:2,t:none,log,deny,msg:'Multipart parser detected a possible unmatched boundary.'"
# enable response body checks
SecResponseBodyAccess On
SecResponseBodyMimeType text/plain text/html text/xml application/json
SecResponseBodyLimit 524288
SecResponseBodyLimitAction ProcessPartial
# log usefull stuff
SecAuditEngine {{ MODSECURITY_SEC_AUDIT_ENGINE }}
SecAuditLogParts {{ MODSECURITY_SEC_AUDIT_LOG_PARTS }}
SecAuditLogType Serial
SecAuditLog /var/log/nginx/modsec_audit.log
# include OWASP CRS configurations
{% if USE_MODSECURITY_CRS == "yes" %}
include /opt/bunkerweb/core/modsecurity/files/crs-setup.conf
# custom CRS configurations before loading rules (e.g. exclusions)
{% if is_custom_conf("/opt/bunkerweb/configs/modsec-crs") %}
include /opt/bunkerweb/configs/modsec-crs/*.conf
{% endif %}
{% if MULTISITE == "yes" and is_custom_conf("/opt/bunkerweb/configs/modsec-crs/" + SERVER_NAME.split(" ")[0]) %}
include /opt/bunkerweb/configs/modsec-crs/{{ SERVER_NAME.split(" ")[0] }}/*.conf
{% endif %}
{% if is_custom_conf("/etc/nginx/modsec-crs") %}
include /etc/nginx/modsec-crs/*.conf
{% endif %}
{% if MULTISITE == "yes" and is_custom_conf("/etc/nginx/" + SERVER_NAME.split(" ")[0] + "/modsec-crs/") %}
include /etc/nginx/{{ SERVER_NAME.split(" ")[0] }}/modsec-crs/*.conf
{% endif %}
# unset REASON env var
SecAction "nolog,phase:1,setenv:REASON=none"
# Auto update allowed methods
{% if ALLOWED_METHODS != "" +%}
SecAction \
"id:900200,\
phase:1,\
nolog,\
pass,\
t:none,\
setvar:'tx.allowed_methods={{ ALLOWED_METHODS.replace("|", " ") }}'"
{% endif +%}
# include OWASP CRS rules
include /opt/bunkerweb/core/modsecurity/files/coreruleset/rules/*.conf
{% endif %}
# custom rules after loading the CRS
{% if is_custom_conf("/opt/bunkerweb/configs/modsec") %}
include /opt/bunkerweb/configs/modsec/*.conf
{% endif %}
{% if MULTISITE == "yes" and is_custom_conf("/opt/bunkerweb/configs/modsec/" + SERVER_NAME.split(" ")[0]) %}
include /opt/bunkerweb/configs/modsec/{{ SERVER_NAME.split(" ")[0] }}/*.conf
{% endif %}
{% if is_custom_conf("/etc/nginx/modsec") %}
include /etc/nginx/modsec/*.conf
{% endif %}
{% if MULTISITE == "yes" and is_custom_conf("/etc/nginx/" + SERVER_NAME.split(" ")[0] + "/modsec") %}
include /etc/nginx/{{ SERVER_NAME.split(" ")[0] }}/modsec/*.conf
{% endif %}
# set REASON env var
{% if USE_MODSECURITY_CRS == "yes" %}
SecRuleUpdateActionById 949110 "t:none,deny,status:{{ DENY_HTTP_STATUS }},setenv:REASON=modsecurity"
SecRuleUpdateActionById 959100 "t:none,deny,status:{{ DENY_HTTP_STATUS }},setenv:REASON=modsecurity"
{% endif %}

View File

@ -1,16 +0,0 @@
{% if USE_REAL_IP == "yes" +%}
{% for element in read_lines("/opt/bunkerweb/cache/realip/combined.list") +%}
set_real_ip_from {{ element }};
{% endfor +%}
{% if REAL_IP_FROM != "" %}
{% for element in REAL_IP_FROM.split(" ") +%}
set_real_ip_from {{ element }};
{% endfor %}
{% endif %}
real_ip_header {{ REAL_IP_HEADER }};
{% if REAL_IP_RECURSIVE == "yes" +%}
real_ip_recursive on;
{% else +%}
real_ip_recursive off;
{% endif +%}
{% endif %}

View File

@ -1,135 +0,0 @@
#!/usr/bin/python3
from ipaddress import ip_address, ip_network
from os import _exit, getenv, makedirs
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
sys_path.append("/opt/bunkerweb/db")
from requests import get
from Database import Database
from logger import setup_logger
from jobs import cache_file, cache_hash, file_hash, is_cached_file
def check_line(line):
if "/" in line:
try:
ip_network(line)
return True, line
except ValueError:
pass
else:
try:
ip_address(line)
return True, line
except ValueError:
pass
return False, ""
logger = setup_logger("REALIP", getenv("LOG_LEVEL", "INFO"))
db = Database(
logger,
sqlalchemy_string=getenv("DATABASE_URI", None),
)
status = 0
try:
# Check if at least a server has Blacklist activated
blacklist_activated = False
# Multisite case
if getenv("MULTISITE") == "yes":
for first_server in getenv("SERVER_NAME").split(" "):
if getenv(first_server + "_USE_REAL_IP", getenv("USE_REAL_IP")) == "yes":
blacklist_activated = True
break
# Singlesite case
elif getenv("USE_REAL_IP") == "yes":
blacklist_activated = True
if not blacklist_activated:
logger.info("RealIP is not activated, skipping download...")
_exit(0)
# Create directory if it doesn't exist
makedirs("/opt/bunkerweb/cache/realip", exist_ok=True)
# Don't go further if the cache is fresh
if is_cached_file("/opt/bunkerweb/cache/realip/combined.list", "hour"):
logger.info("RealIP list is already in cache, skipping download...")
_exit(0)
# Get URLs
urls = []
for url in getenv("REALIP_FROM_URLS", "").split(" "):
if url != "" and url not in urls:
urls.append(url)
# Download and write data to temp file
i = 0
content = ""
for url in urls:
try:
logger.info(f"Downloading RealIP list from {url} ...")
resp = get(url, stream=True)
if resp.status_code != 200:
continue
for line in resp.iter_lines(decode_unicode=True):
line = line.strip().split(" ")[0]
if line == "" or line.startswith("#") or line.startswith(";"):
continue
ok, data = check_line(line)
if ok:
content += f"{data}\n"
i += 1
except:
status = 2
logger.error(
f"Exception while getting RealIP list from {url} :\n{format_exc()}"
)
with open("/opt/bunkerweb/tmp/realip-combined.list", "w") as f:
f.write(content)
# Check if file has changed
new_hash = file_hash("/opt/bunkerweb/tmp/realip-combined.list")
old_hash = cache_hash("/opt/bunkerweb/cache/realip/combined.list")
if new_hash == old_hash:
logger.info("New file is identical to cache file, reload is not needed")
_exit(0)
# Put file in cache
cached, err = cache_file(
"/opt/bunkerweb/tmp/realip-combined.list",
"/opt/bunkerweb/cache/realip/combined.list",
new_hash,
)
if not cached:
logger.error(f"Error while caching list : {err}")
_exit(2)
# Update db
err = db.update_job_cache(
"realip-download",
None,
"combined.list",
content.encode("utf-8"),
checksum=new_hash,
)
if err:
logger.warning(f"Couldn't update db cache: {err}")
logger.info(f"Downloaded {i} trusted IP/net")
status = 1
except:
status = 2
logger.error(f"Exception while running realip-download.py :\n{format_exc()}")
sys_exit(status)

View File

@ -1,8 +0,0 @@
{% if has_variable(all, "USE_PROXY_CACHE", "yes") +%}
proxy_cache_path /opt/bunkerweb/tmp/proxy_cache levels={{ PROXY_CACHE_PATH_LEVELS }} keys_zone=proxycache:{{ PROXY_CACHE_PATH_ZONE_SIZE }} {{ PROXY_CACHE_PATH_PARAMS }};
{% endif %}
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}

View File

@ -1,19 +0,0 @@
{% if GENERATE_SELF_SIGNED_SSL == "yes" %}
# listen on HTTPS PORT
listen 0.0.0.0:{{ HTTPS_PORT }} ssl {% if HTTP2 == "yes" %}http2{% endif %} {% if USE_PROXY_PROTOCOL == "yes" %}proxy_protocol{% endif %};
# TLS config
ssl_certificate /opt/bunkerweb/cache/selfsigned/{{ SERVER_NAME.split(" ")[0] }}.pem;
ssl_certificate_key /opt/bunkerweb/cache/selfsigned/{{ SERVER_NAME.split(" ")[0] }}.key;
ssl_protocols {{ HTTPS_PROTOCOLS }};
ssl_prefer_server_ciphers on;
ssl_session_tickets off;
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m;
{% if "TLSv1.2" in HTTPS_PROTOCOLS +%}
ssl_dhparam /etc/nginx/dhparam;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
{% endif %}
{% endif %}

View File

@ -1,116 +0,0 @@
#!/usr/bin/python3
from os import getenv, makedirs
from os.path import isfile
from subprocess import DEVNULL, STDOUT, run
from sys import exit as sys_exit, path as sys_path
from tarfile import open as taropen, TarInfo
from io import BytesIO
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
sys_path.append("/opt/bunkerweb/db")
from Database import Database
from logger import setup_logger
logger = setup_logger("self-signed", getenv("LOG_LEVEL", "INFO"))
db = Database(
logger,
sqlalchemy_string=getenv("DATABASE_URI", None),
)
def generate_cert(first_server, days, subj):
if isfile("/opt/bunkerweb/cache/selfsigned/" + first_server + ".pem"):
cmd = (
"openssl x509 -checkend 86400 -noout -in /opt/bunkerweb/cache/selfsigned/"
+ first_server
+ ".pem"
)
proc = run(cmd.split(" "), stdin=DEVNULL, stderr=STDOUT)
if proc.returncode == 0:
logger.info(f"Self-signed certificate already present for {first_server}")
return True, 0
logger.info(f"Generating self-signed certificate for {first_server}")
cmd = f"openssl req -nodes -x509 -newkey rsa:4096 -keyout /opt/bunkerweb/cache/selfsigned/{first_server}.key -out /opt/bunkerweb/cache/selfsigned/{first_server}.pem -days {days} -subj {subj}"
proc = run(cmd.split(" "), stdin=DEVNULL, stderr=STDOUT)
if proc.returncode != 0:
logger.error(f"Self-signed certificate generation failed for {first_server}")
return False, 2
# Update db
with open(f"/opt/bunkerweb/cache/selfsigned/{first_server}.key", "r") as f:
key_data = f.read().encode("utf-8")
err = db.update_job_cache(
"self-signed", first_server, f"{first_server}.key", key_data
)
if err:
logger.warning(f"Couldn't update db cache for {first_server}.key file: {err}")
with open(f"/opt/bunkerweb/cache/selfsigned/{first_server}.pem", "r") as f:
pem_data = f.read().encode("utf-8")
err = db.update_job_cache(
"self-signed", first_server, f"{first_server}.pem", pem_data
)
if err:
logger.warning(f"Couldn't update db cache for {first_server}.pem file: {err}")
logger.info(f"Successfully generated self-signed certificate for {first_server}")
return True, 1
status = 0
try:
makedirs("/opt/bunkerweb/cache/selfsigned/", exist_ok=True)
# Multisite case
if getenv("MULTISITE") == "yes":
for first_server in getenv("SERVER_NAME").split(" "):
if (
getenv(
first_server + "_GENERATE_SELF_SIGNED_SSL",
getenv("GENERATE_SELF_SIGNED_SSL"),
)
!= "yes"
):
continue
if first_server == "":
continue
if isfile("/opt/bunkerweb/cache/selfsigned/" + first_server + ".pem"):
continue
ret, ret_status = generate_cert(
first_server,
getenv(first_server + "_SELF_SIGNED_SSL_EXPIRY"),
getenv(first_server + "_SELF_SIGNED_SSL_SUBJ"),
)
if not ret:
status = ret_status
elif ret_status == 1 and ret_status != 2:
status = 1
# Singlesite case
elif getenv("GENERATE_SELF_SIGNED_SSL") == "yes" and getenv("SERVER_NAME") != "":
first_server = getenv("SERVER_NAME").split(" ")[0]
ret, ret_status = generate_cert(
first_server,
getenv("SELF_SIGNED_SSL_EXPIRY"),
getenv("SELF_SIGNED_SSL_SUBJ"),
)
if not ret:
status = ret_status
elif ret_status == 1 and ret_status != 2:
status = 1
except:
status = 2
logger.error(f"Exception while running certbot-new.py :\n{format_exc()}")
sys_exit(status)

View File

@ -1,183 +0,0 @@
#!/usr/bin/python3
from ipaddress import ip_address, ip_network
from os import _exit, getenv, makedirs
from re import match
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
sys_path.append("/opt/bunkerweb/db")
from requests import get
from Database import Database
from logger import setup_logger
from jobs import cache_file, cache_hash, is_cached_file, file_hash
def check_line(kind, line):
if kind == "IP":
if "/" in line:
try:
ip_network(line)
return True, line
except ValueError:
pass
else:
try:
ip_address(line)
return True, line
except ValueError:
pass
return False, ""
elif kind == "RDNS":
if match(r"^(\.?[A-Za-z0-9\-]+)*\.[A-Za-z]{2,}$", line):
return True, line.lower()
return False, ""
elif kind == "ASN":
real_line = line.replace("AS", "")
if match(r"^\d+$", real_line):
return True, real_line
elif kind == "USER_AGENT":
return True, line.replace("\\ ", " ").replace("\\.", "%.").replace(
"\\\\", "\\"
).replace("-", "%-")
elif kind == "URI":
if match(r"^/", line):
return True, line
return False, ""
logger = setup_logger("WHITELIST", getenv("LOG_LEVEL", "INFO"))
db = Database(
logger,
sqlalchemy_string=getenv("DATABASE_URI", None),
)
status = 0
try:
# Check if at least a server has Whitelist activated
whitelist_activated = False
# Multisite case
if getenv("MULTISITE") == "yes":
for first_server in getenv("SERVER_NAME").split(" "):
if (
getenv(first_server + "_USE_WHITELIST", getenv("USE_WHITELIST"))
== "yes"
):
whitelist_activated = True
break
# Singlesite case
elif getenv("USE_WHITELIST") == "yes":
whitelist_activated = True
if not whitelist_activated:
logger.info("Whitelist is not activated, skipping downloads...")
_exit(0)
# Create directories if they don't exist
makedirs("/opt/bunkerweb/cache/whitelist", exist_ok=True)
makedirs("/opt/bunkerweb/tmp/whitelist", exist_ok=True)
# Our urls data
urls = {"IP": [], "RDNS": [], "ASN": [], "USER_AGENT": [], "URI": []}
# Don't go further if the cache is fresh
kinds_fresh = {
"IP": True,
"RDNS": True,
"ASN": True,
"USER_AGENT": True,
"URI": True,
}
all_fresh = True
for kind in kinds_fresh:
if not is_cached_file(f"/opt/bunkerweb/cache/whitelist/{kind}.list", "hour"):
kinds_fresh[kind] = False
all_fresh = False
logger.info(
f"Whitelist for {kind} is not cached, processing downloads...",
)
else:
logger.info(
f"Whitelist for {kind} is already in cache, skipping downloads...",
)
if all_fresh:
_exit(0)
# Get URLs
urls = {"IP": [], "RDNS": [], "ASN": [], "USER_AGENT": [], "URI": []}
for kind in urls:
for url in getenv(f"WHITELIST_{kind}_URLS", "").split(" "):
if url != "" and url not in urls[kind]:
urls[kind].append(url)
# Loop on kinds
for kind, urls_list in urls.items():
if kinds_fresh[kind]:
continue
# Write combined data of the kind to a single temp file
for url in urls_list:
try:
logger.info(f"Downloading whitelist data from {url} ...")
resp = get(url)
if resp.status_code != 200:
continue
i = 0
with open(f"/opt/bunkerweb/tmp/whitelist/{kind}.list", "w") as f:
for line in resp.content.decode("utf-8").splitlines():
line = line.strip()
if kind != "USER_AGENT":
line = line.strip().split(" ")[0]
if line == "" or line.startswith("#") or line.startswith(";"):
continue
ok, data = check_line(kind, line)
if ok:
f.write(data + "\n")
i += 1
logger.info(f"Downloaded {i} bad {kind}")
# Check if file has changed
new_hash = file_hash(f"/opt/bunkerweb/tmp/whitelist/{kind}.list")
old_hash = cache_hash(f"/opt/bunkerweb/cache/whitelist/{kind}.list")
if new_hash == old_hash:
logger.info(
f"New file {kind}.list is identical to cache file, reload is not needed",
)
else:
logger.info(
f"New file {kind}.list is different than cache file, reload is needed",
)
# Put file in cache
cached, err = cache_file(
f"/opt/bunkerweb/tmp/whitelist/{kind}.list",
f"/opt/bunkerweb/cache/whitelist/{kind}.list",
new_hash,
)
if not cached:
logger.error(f"Error while caching whitelist : {err}")
status = 2
if status != 2:
# Update db
err = db.update_job_cache(
"whitelist-download",
None,
f"{kind}.list",
resp.content,
checksum=new_hash,
)
if err:
logger.warning(f"Couldn't update db cache: {err}")
status = 1
except:
status = 2
logger.error(
f"Exception while getting whitelist from {url} :\n{format_exc()}",
)
except:
status = 2
logger.error(f"Exception while running whitelist-download.py :\n{format_exc()}")
sys_exit(status)

View File

@ -1,253 +0,0 @@
local _M = {}
_M.__index = _M
local utils = require "utils"
local datastore = require "datastore"
local logger = require "logger"
local cjson = require "cjson"
local ipmatcher = require "resty.ipmatcher"
function _M.new()
local self = setmetatable({}, _M)
return self, nil
end
function _M:init()
-- Check if init is needed
local init_needed, err = utils.has_variable("USE_WHITELIST", "yes")
if init_needed == nil then
return false, err
end
if not init_needed then
return true, "no service uses Whitelist, skipping init"
end
-- Read whitelists
local whitelists = {
["IP"] = {},
["RDNS"] = {},
["ASN"] = {},
["USER_AGENT"] = {},
["URI"] = {}
}
local i = 0
for kind, _ in pairs(whitelists) do
local f, err = io.open("/opt/bunkerweb/cache/whitelist/" .. kind .. ".list", "r")
if f then
for line in f:lines() do
table.insert(whitelists[kind], line)
i = i + 1
end
f:close()
end
end
-- Load them into datastore
local ok, err = datastore:set("plugin_whitelist_list", cjson.encode(whitelists))
if not ok then
return false, "can't store Whitelist list into datastore : " .. err
end
return true, "successfully loaded " .. tostring(i) .. " whitelisted IP/network/rDNS/ASN/User-Agent/URI"
end
function _M:access()
-- Check if access is needed
local access_needed, err = utils.get_variable("USE_WHITELIST")
if access_needed == nil then
return false, err, nil, nil
end
if access_needed ~= "yes" then
return true, "Whitelist not activated", nil, nil
end
-- Check the cache
local cached_ip, err = self:is_in_cache("ip" .. ngx.var.remote_addr)
if cached_ip and cached_ip ~= "ok" then
ngx.var.is_whitelisted = "yes"
return true, "IP is in whitelist cache (info = " .. cached_ip .. ")", true, ngx.OK
end
local cached_uri, err = self:is_in_cache("uri" .. ngx.var.uri)
if cached_uri and cached_uri ~= "ok" then
ngx.var.is_whitelisted = "yes"
return true, "URI is in whitelist cache (info = " .. cached_uri .. ")", true, ngx.OK
end
local cached_ua = true
if ngx.var.http_user_agent then
cached_ua, err = self:is_in_cache("ua" .. ngx.var.http_user_agent)
if cached_ua and cached_ua ~= "ok" then
ngx.var.is_whitelisted = "yes"
return true, "User-Agent is in whitelist cache (info = " .. cached_ua .. ")", true, ngx.OK
end
end
if cached_ip and cached_uri and cached_ua then
return true, "full request is in whitelist cache (not whitelisted)", nil, nil
end
-- Get list
local data, err = datastore:get("plugin_whitelist_list")
if not data then
return false, "can't get Whitelist list : " .. err, false, nil
end
local ok, whitelists = pcall(cjson.decode, data)
if not ok then
return false, "error while decoding whitelists : " .. whitelists, false, nil
end
-- Return value
local ret, ret_err = true, "success"
-- Check if IP is in IP/net whitelist
local ip_net, err = utils.get_variable("WHITELIST_IP")
if ip_net and ip_net ~= "" then
for element in ip_net:gmatch("%S+") do
table.insert(whitelists["IP"], element)
end
end
if not cached_ip then
local ipm, err = ipmatcher.new(whitelists["IP"])
if not ipm then
ret = false
ret_err = "can't instantiate ipmatcher " .. err
else
if ipm:match(ngx.var.remote_addr) then
self:add_to_cache("ip" .. ngx.var.remote_addr, "ip/net")
ngx.var.is_whitelisted = "yes"
return ret, "client IP " .. ngx.var.remote_addr .. " is in whitelist", true, ngx.OK
end
end
end
-- Check if rDNS is in whitelist
local rdns_global, err = utils.get_variable("WHITELIST_RDNS_GLOBAL")
local check = true
if not rdns_global then
logger.log(ngx.ERR, "WHITELIST", "Error while getting WHITELIST_RDNS_GLOBAL variable : " .. err)
elseif rdns_global == "yes" then
check, err = utils.ip_is_global(ngx.var.remote_addr)
if check == nil then
logger.log(ngx.ERR, "WHITELIST", "Error while getting checking if IP is global : " .. err)
end
end
if not cached_ip and check then
local rdns, err = utils.get_rdns(ngx.var.remote_addr)
if not rdns then
ret = false
ret_err = "error while trying to get reverse dns : " .. err
else
local rdns_list, err = utils.get_variable("WHITELIST_RDNS")
if rdns_list and rdns_list ~= "" then
for element in rdns_list:gmatch("%S+") do
table.insert(whitelists["RDNS"], element)
end
end
for i, suffix in ipairs(whitelists["RDNS"]) do
if rdns:sub(-#suffix) == suffix then
self:add_to_cache("ip" .. ngx.var.remote_addr, "rDNS " .. suffix)
ngx.var.is_whitelisted = "yes"
return ret, "client IP " .. ngx.var.remote_addr .. " is in whitelist (info = rDNS " .. suffix .. ")", true, ngx.OK
end
end
end
end
-- Check if ASN is in whitelist
if not cached_ip then
if utils.ip_is_global(ngx.var.remote_addr) then
local asn, err = utils.get_asn(ngx.var.remote_addr)
if not asn then
ret = false
ret_err = "error while trying to get asn number : " .. err
else
local asn_list, err = utils.get_variable("WHITELIST_ASN")
if asn_list and asn_list ~= "" then
for element in asn_list:gmatch("%S+") do
table.insert(whitelists["ASN"], element)
end
end
for i, asn_bl in ipairs(whitelists["ASN"]) do
if tostring(asn) == asn_bl then
self:add_to_cache("ip" .. ngx.var.remote_addr, "ASN " .. tostring(asn))
ngx.var.is_whitelisted = "yes"
return ret, "client IP " .. ngx.var.remote_addr .. " is in whitelist (kind = ASN " .. tostring(asn) .. ")", true, ngx.OK
end
end
end
end
end
-- IP is not whitelisted
local ok, err = self:add_to_cache("ip" .. ngx.var.remote_addr, "ok")
if not ok then
ret = false
ret_err = err
end
-- Check if User-Agent is in whitelist
if not cached_ua and ngx.var.http_user_agent then
local ua_list, err = utils.get_variable("WHITELIST_USER_AGENT")
if ua_list and ua_list ~= "" then
for element in ua_list:gmatch("%S+") do
table.insert(whitelists["USER_AGENT"], element)
end
end
for i, ua_bl in ipairs(whitelists["USER_AGENT"]) do
if ngx.var.http_user_agent:match(ua_bl) then
self:add_to_cache("ua" .. ngx.var.http_user_agent, "UA " .. ua_bl)
ngx.var.is_whitelisted = "yes"
return ret, "client User-Agent " .. ngx.var.http_user_agent .. " is in whitelist (matched " .. ua_bl .. ")", true, ngx.OK
end
end
-- UA is not whitelisted
local ok, err = self:add_to_cache("ua" .. ngx.var.http_user_agent, "ok")
if not ok then
ret = false
ret_err = err
end
end
-- Check if URI is in whitelist
if not cached_uri then
local uri_list, err = utils.get_variable("WHITELIST_URI")
if uri_list and uri_list ~= "" then
for element in uri_list:gmatch("%S+") do
table.insert(whitelists["URI"], element)
end
end
for i, uri_bl in ipairs(whitelists["URI"]) do
if ngx.var.uri:match(uri_bl) then
self:add_to_cache("uri" .. ngx.var.uri, "URI " .. uri_bl)
ngx.var.is_whitelisted = "yes"
return ret, "client URI " .. ngx.var.uri .. " is in whitelist (matched " .. uri_bl .. ")", true, ngx.OK
end
end
end
-- URI is not whitelisted
local ok, err = self:add_to_cache("uri" .. ngx.var.uri, "ok")
if not ok then
ret = false
ret_err = err
end
return ret, "IP is not in list (error = " .. ret_err .. ")", false, nil
end
function _M:is_in_cache(ele)
local kind, err = datastore:get("plugin_whitelist_cache_" .. ngx.var.server_name .. ele)
if not kind then
if err ~= "not found" then
logger.log(ngx.ERR, "WHITELIST", "Error while accessing cache : " .. err)
end
return false, err
end
return kind, "success"
end
function _M:add_to_cache(ele, kind)
local ok, err = datastore:set("plugin_whitelist_cache_" .. ngx.var.server_name .. ele, kind, 3600)
if not ok then
logger.log(ngx.ERR, "WHITELIST", "Error while adding element to cache : " .. err)
return false, err
end
return true, "success"
end
return _M

View File

@ -1,293 +0,0 @@
#!/bin/bash
function git_update_checker() {
repo="$1"
commit="$2"
main_tmp_folder="/tmp/bunkerweb"
mkdir -p "${main_tmp_folder}"
echo " Check updates for ${repo}"
folder="$(echo "$repo" | sed -E "s@https://github.com/.*/(.*)\.git@\1@")"
output="$(git clone "$repo" "${main_tmp_folder}/${folder}" 2>&1)"
if [ $? -ne 0 ] ; then
echo "❌ Error cloning $1"
echo "$output"
rm -rf "${main_tmp_folder}/${folder}" || true
return
fi
old_dir="$(pwd)"
cd "${main_tmp_folder}/${folder}"
output="$(git checkout "${commit}^{commit}" 2>&1)"
if [ $? -ne 0 ] ; then
echo "❌ Commit hash $commit is absent from repository $repo"
echo "$output"
rm -rf "${main_tmp_folder}/${folder}" || true
cd "$old_dir"
return
fi
output="$(git fetch 2>&1)"
if [ $? -ne 0 ] ; then
echo "⚠️ Upgrade version checker error on $repo"
echo "$output"
rm -rf "${main_tmp_folder}/${folder}" || true
cd "$old_dir"
return
fi
latest_tag=$(git describe --tags `git rev-list --tags --max-count=1`)
if [ $? -ne 0 ] ; then
echo "⚠️ Upgrade version checker error on getting latest tag $repo"
echo "$latest_tag"
rm -rf "${main_tmp_folder}/${folder}" || true
cd "$old_dir"
return
fi
latest_release=$(curl --silent "https://api.github.com/repos/$full_name_repo/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/')
if [ $? -ne 0 ] ; then
echo "⚠️ Upgrade version checker error on getting latest release $repo"
echo "$latest_release"
rm -fr "${main_tmp_folder}/${folder}" || true
cd "$old_dir"
return
fi
current_tag=$(git describe --tags)
if [[ ! -z "$latest_tag" ]] && [[ "$current_tag" != *"$latest_tag"* ]]; then
echo "⚠️ Update checker: new tag found: $latest_tag, current tag/release: $current_tag, please update"
fi
if [[ ! -z "$latest_release" ]] && [[ "$current_tag" != *"$latest_release"* ]]; then
echo "⚠️ Update checker: new tag found: $latest_release, current tag/release: $current_tag, please update"
fi
rm -rf "${main_tmp_folder}/${folder}" || true
cd "$old_dir"
}
function git_secure_clone() {
repo="$1"
commit="$2"
folder="$(echo "$repo" | sed -E "s@https://github.com/.*/(.*)\.git@\1@")"
if [ ! -d "deps/src/${folder}" ] ; then
output="$(git clone "$repo" "deps/src/${folder}" 2>&1)"
if [ $? -ne 0 ] ; then
echo "❌ Error cloning $1"
echo "$output"
exit 1
fi
old_dir="$(pwd)"
cd "deps/src/${folder}"
output="$(git checkout "${commit}^{commit}" 2>&1)"
if [ $? -ne 0 ] ; then
echo "❌ Commit hash $commit is absent from repository $repo"
echo "$output"
exit 1
fi
cd "$old_dir"
output="$(rm -rf "deps/src/${folder}/.git")"
if [ $? -ne 0 ] ; then
echo "❌ Can't delete .git from repository $repo"
echo "$output"
exit 1
fi
else
echo "⚠️ Skipping clone of $repo because target directory is already present"
git_update_checker $repo $commit
fi
}
function secure_download() {
link="$1"
file="$2"
hash="$3"
dir="$(echo $file | sed 's/.tar.gz//g')"
if [ ! -d "deps/src/${dir}" ] ; then
output="$(wget -q -O "deps/src/${file}" "$link" 2>&1)"
if [ $? -ne 0 ] ; then
echo "❌ Error downloading $link"
echo "$output"
exit 1
fi
check="$(sha512sum "deps/src/${file}" | cut -d ' ' -f 1)"
if [ "$check" != "$hash" ] ; then
echo "❌️ Wrong hash from file $link (expected $hash got $check)"
exit 1
fi
else
echo "⚠️ Skipping download of $link because target directory is already present"
fi
}
function do_and_check_cmd() {
if [ "$CHANGE_DIR" != "" ] ; then
cd "$CHANGE_DIR"
fi
output=$("$@" 2>&1)
ret="$?"
if [ $ret -ne 0 ] ; then
echo "❌ Error from command : $*"
echo "$output"
exit $ret
fi
#echo $output
return 0
}
# nginx 1.20.2
echo " Download nginx"
NGINX_VERSION="1.20.2"
secure_download "https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz" "nginx-${NGINX_VERSION}.tar.gz" "8b65e881ea4ac6162cbf32e5e95cf47a6d5418819f8763ca4a781cffa38187dd7886d4bc195d000a7046111a27121ff25800f8645405174995247e6738b4279a"
if [ -f "deps/src/nginx-${NGINX_VERSION}.tar.gz" ] ; then
do_and_check_cmd tar -xvzf deps/src/nginx-${NGINX_VERSION}.tar.gz -C deps/src
do_and_check_cmd rm -f deps/src/nginx-${NGINX_VERSION}.tar.gz
fi
# Lua 5.1.5
echo " Download Lua"
LUA_VERSION="5.1.5"
secure_download "https://www.lua.org/ftp/lua-${LUA_VERSION}.tar.gz" "lua-${LUA_VERSION}.tar.gz" "0142fefcbd13afcd9b201403592aa60620011cc8e8559d4d2db2f92739d18186860989f48caa45830ff4f99bfc7483287fd3ff3a16d4dec928e2767ce4d542a9"
if [ -f "deps/src/lua-${LUA_VERSION}.tar.gz" ] ; then
do_and_check_cmd tar -xvzf deps/src/lua-${LUA_VERSION}.tar.gz -C deps/src
do_and_check_cmd rm -f deps/src/lua-${LUA_VERSION}.tar.gz
do_and_check_cmd patch deps/src/lua-5.1.5/Makefile deps/misc/lua.patch1
do_and_check_cmd patch deps/src/lua-5.1.5/src/Makefile deps/misc/lua.patch2
fi
# LuaJIT 2.1-20220111
echo " Download LuaJIT"
git_secure_clone "https://github.com/openresty/luajit2.git" "f1491357fa1dbfa3480ba67513fee19a9c65ca6f"
# lua-nginx-module v0.10.20
echo " Download lua-nginx-module"
git_secure_clone "https://github.com/openresty/lua-nginx-module.git" "9007d673e28938f5dfa7720438991e22b794d225"
# lua-resty-core v0.1.22
echo " Download lua-resty-core"
git_secure_clone "https://github.com/openresty/lua-resty-core.git" "12f26310a35e45c37157420f7e1f395a0e36e457"
# lua-resty-lrucache v0.11
echo " Download lua-resty-lrucache"
git_secure_clone "https://github.com/openresty/lua-resty-lrucache.git" "f20bb8ac9489ba87d90d78f929552c2eab153caa"
# lua-resty-dns v0.22
echo " Download lua-resty-dns"
git_secure_clone "https://github.com/openresty/lua-resty-dns.git" "869d2fbb009b6ada93a5a10cb93acd1cc12bd53f"
# lua-resty-session v3.10
echo " Download lua-resty-session"
git_secure_clone "https://github.com/bungle/lua-resty-session.git" "e6bf2630c90df7b3db35e859f0aa7e096af3e918"
# lua-resty-random v?
echo " Download lua-resty-random"
git_secure_clone "https://github.com/bungle/lua-resty-random.git" "17b604f7f7dd217557ca548fc1a9a0d373386480"
# lua-resty-string v0.15
echo " Download lua-resty-string"
git_secure_clone "https://github.com/openresty/lua-resty-string.git" "b192878f6ed31b0af237935bbc5a8110a3c2256c"
# lua-cjson v2.1.0.8
echo " Download lua-cjson"
git_secure_clone "https://github.com/openresty/lua-cjson.git" "0df488874f52a881d14b5876babaa780bb6200ee"
# lua-gd v?
echo " Download lua-gd"
git_secure_clone "https://github.com/ittner/lua-gd.git" "2ce8e478a8591afd71e607506bc8c64b161bbd30"
# lua-resty-http v1.16.1
echo " Download lua-resty-http"
git_secure_clone "https://github.com/ledgetech/lua-resty-http.git" "9bf951dfe162dd9710a0e1f4525738d4902e9d20"
# lualogging v1.6.0
echo " Download lualogging"
git_secure_clone "https://github.com/lunarmodules/lualogging.git" "0bc4415de03ff1a99c92c02a5bed14a45b078079"
# luasocket v?
echo " Download luasocket"
git_secure_clone "https://github.com/diegonehab/luasocket.git" "5b18e475f38fcf28429b1cc4b17baee3b9793a62"
# luasec v1.0.2
echo " Download luasec"
git_secure_clone "https://github.com/brunoos/luasec.git" "ef14b27a2c8e541cac071165048250e85a7216df"
# lua-resty-ipmatcher v0.6.1 (1 commit after just in case)
echo " Download lua-resty-ipmatcher"
dopatch="no"
if [ ! -d "deps/src/lua-resty-ipmatcher" ] ; then
dopatch="yes"
fi
git_secure_clone "https://github.com/api7/lua-resty-ipmatcher.git" "3948a92d2e168db14fa5ecd4bb10a7c0fe7ead70"
if [ "$dopatch" = "yes" ] ; then
do_and_check_cmd patch deps/src/lua-resty-ipmatcher/resty/ipmatcher.lua deps/misc/ipmatcher.patch
fi
# lua-resty-redis v0.29
echo " Download lua-resty-redis"
git_secure_clone "https://github.com/openresty/lua-resty-redis.git" "053f989c7f43d8edc79d5151e73b79249c6b5d94"
# lua-resty-upload v0.10
echo " Download lua-resty-upload"
git_secure_clone "https://github.com/openresty/lua-resty-upload.git" "cae01f590456561bc8d95da3d2d9f937bef57bec"
# luajit-geoip v2.1.0
echo " Download luajit-geoip"
dopatch="no"
if [ ! -d "deps/src/luajit-geoip" ] ; then
dopatch="yes"
fi
git_secure_clone "https://github.com/leafo/luajit-geoip.git" "12a9388207f40c37ad5cf6de2f8e0cc72bf13477"
if [ "$dopatch" = "yes" ] ; then
do_and_check_cmd patch deps/src/luajit-geoip/geoip/mmdb.lua deps/misc/mmdb.patch
fi
# lbase64 v1.5.3
echo " Download lbase64"
git_secure_clone "https://github.com/iskolbin/lbase64.git" "c261320edbdf82c16409d893a96c28c704aa0ab8"
# ModSecurity v3.0.4 (looks like v3.0.5 has a memleak on reload)
# TODO : test v3.0.6
echo " Download ModSecurity"
if [ ! -d "deps/src/ModSecurity" ] ; then
dopatch="yes"
fi
git_secure_clone "https://github.com/SpiderLabs/ModSecurity.git" "753145fbd1d6751a6b14fdd700921eb3cc3a1d35"
if [ "$dopatch" = "yes" ] ; then
do_and_check_cmd patch deps/src/ModSecurity/configure.ac deps/misc/modsecurity.patch
fi
# libinjection v?
echo " Download libinjection"
git_secure_clone "https://github.com/libinjection/libinjection.git" "49904c42a6e68dc8f16c022c693e897e4010a06c"
do_and_check_cmd cp -r deps/src/libinjection deps/src/ModSecurity/others
# ModSecurity-nginx v1.0.2
echo " Download ModSecurity-nginx"
dopatch="no"
if [ ! -d "deps/src/ModSecurity-nginx" ] ; then
dopatch="yes"
fi
git_secure_clone "https://github.com/SpiderLabs/ModSecurity-nginx.git" "2497e6ac654d0b117b9534aa735b757c6b11c84f"
if [ "$dopatch" = "yes" ] ; then
do_and_check_cmd patch deps/src/ModSecurity-nginx/src/ngx_http_modsecurity_log.c deps/misc/modsecurity-nginx.patch
fi
# libmaxminddb v1.6.0
echo " Download libmaxminddb"
git_secure_clone "https://github.com/maxmind/libmaxminddb.git" "2d0e6b7360b88f645e67ffc5a709b2327d361ac3"
# headers-more-nginx-module v?
echo " Download headers-more-nginx-module"
git_secure_clone "https://github.com/openresty/headers-more-nginx-module.git" "a4a0686605161a6777d7d612d5aef79b9e7c13e0"
# ngx_http_geoip2_module v3.3
#echo " Download ngx_http_geoip2_module"
#dosed="no"
#if [ ! -d "deps/src/ngx_http_geoip2_module" ] ; then
# dosed="yes"
#fi
#git_secure_clone "https://github.com/leev/ngx_http_geoip2_module.git" "5a83b6f958c67ea88d2899d0b3c2a5db8e36b211"
#if [ "$dosed" = "yes" ] ; then
# do_and_check_cmd sed -i '1s:^:ngx_feature_path=/opt/bunkerweb/deps/include\n:' deps/src/ngx_http_geoip2_module/config
# do_and_check_cmd sed -i 's:^ngx_feature_libs=.*$:ngx_feature_libs="-Wl,-rpath,/opt/bunkerweb/deps/lib -L/opt/bunkerweb/deps/lib -lmaxminddb":' deps/src/ngx_http_geoip2_module/config
#fi
# nginx_cookie_flag_module v1.1.0
echo " Download nginx_cookie_flag_module"
git_secure_clone "https://github.com/AirisX/nginx_cookie_flag_module.git" "4e48acf132952bbed43b28a8e6af0584dacb7b4c"
# ngx_brotli v?
echo " Download ngx_brotli"
git_secure_clone "https://github.com/google/ngx_brotli.git" "9aec15e2aa6feea2113119ba06460af70ab3ea62"

View File

@ -1,138 +0,0 @@
#!/bin/bash
function do_and_check_cmd() {
if [ "$CHANGE_DIR" != "" ] ; then
cd "$CHANGE_DIR"
fi
output=$("$@" 2>&1)
ret="$?"
if [ $ret -ne 0 ] ; then
echo "❌ Error from command : $*"
echo "$output"
exit $ret
fi
#echo $output
return 0
}
NTASK=$(nproc)
# Compile and install lua
echo " Compile and install lua-5.1.5"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-5.1.5" do_and_check_cmd make -j $NTASK linux
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-5.1.5" do_and_check_cmd make INSTALL_TOP=/opt/bunkerweb/deps install
# Compile and install libmaxminddb
echo " Compile and install libmaxminddb"
# TODO : temp fix run it twice...
cd /tmp/bunkerweb/deps/src/libmaxminddb && ./bootstrap > /dev/null 2>&1
CHANGE_DIR="/tmp/bunkerweb/deps/src/libmaxminddb" do_and_check_cmd ./bootstrap
CHANGE_DIR="/tmp/bunkerweb/deps/src/libmaxminddb" do_and_check_cmd ./configure --prefix=/opt/bunkerweb/deps --disable-tests
CHANGE_DIR="/tmp/bunkerweb/deps/src/libmaxminddb" do_and_check_cmd make -j $NTASK
CHANGE_DIR="/tmp/bunkerweb/deps/src/libmaxminddb" do_and_check_cmd make install
# Compile and install ModSecurity
echo " Compile and install ModSecurity"
# temp fix : Debian run it twice
# TODO : patch it in clone.sh
cd /tmp/bunkerweb/deps/src/ModSecurity && ./build.sh > /dev/null 2>&1
CHANGE_DIR="/tmp/bunkerweb/deps/src/ModSecurity" do_and_check_cmd sh build.sh
CHANGE_DIR="/tmp/bunkerweb/deps/src/ModSecurity" do_and_check_cmd ./configure --disable-dependency-tracking --disable-static --disable-examples --disable-doxygen-doc --disable-doxygen-html --disable-valgrind-memcheck --disable-valgrind-helgrind --prefix=/opt/bunkerweb/deps --with-maxmind=/opt/bunkerweb/deps
CHANGE_DIR="/tmp/bunkerweb/deps/src/ModSecurity" do_and_check_cmd make -j $NTASK
CHANGE_DIR="/tmp/bunkerweb/deps/src/ModSecurity" do_and_check_cmd make install-strip
# Compile and install luajit2
echo " Compile and install luajit2"
CHANGE_DIR="/tmp/bunkerweb/deps/src/luajit2" do_and_check_cmd make -j $NTASK
CHANGE_DIR="/tmp/bunkerweb/deps/src/luajit2" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps install
# Install lua-resty-core
echo " Install openresty/lua-resty-core"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-resty-core" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps install
# Install lua-resty-lrucache
echo " Install lua-resty-lrucache"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-resty-lrucache" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps install
# Install lua-resty-dns
echo " Install lua-resty-dns"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-resty-dns" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps install
# Install lua-resty-session
echo " Install lua-resty-session"
do_and_check_cmd cp -r /tmp/bunkerweb/deps/src/lua-resty-session/lib/resty/* /opt/bunkerweb/deps/lib/lua/resty
# Install lua-resty-random
echo " Install lua-resty-random"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-resty-random" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps install
# Install lua-resty-string
echo " Install lua-resty-string"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-resty-string" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps install
# Compile and install lua-cjson
echo " Compile and install lua-cjson"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-cjson" do_and_check_cmd make LUA_INCLUDE_DIR=/opt/bunkerweb/deps/include -j $NTASK
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-cjson" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps LUA_CMODULE_DIR=/opt/bunkerweb/deps/lib/lua LUA_MODULE_DIR=/opt/bunkerweb/deps/lib/lua install
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-cjson" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps LUA_CMODULE_DIR=/opt/bunkerweb/deps/lib/lua LUA_MODULE_DIR=/opt/bunkerweb/deps/lib/lua install-extra
# Compile and install lua-gd
echo " Compile and install lua-gd"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-gd" do_and_check_cmd make "CFLAGS=-O3 -Wall -fPIC -fomit-frame-pointer -I/opt/bunkerweb/deps/include -DVERSION=\\\"2.0.33r3\\\"" "LFLAGS=-shared -L/opt/bunkerweb/deps/lib -llua -lgd -Wl,-rpath=/opt/bunkerweb/deps/lib" LUABIN=/opt/bunkerweb/deps/bin/lua -j $NTASK
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-gd" do_and_check_cmd make INSTALL_PATH=/opt/bunkerweb/deps/lib/lua install
# Download and install lua-resty-http
echo " Install lua-resty-http"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-resty-http" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps install
# Download and install lualogging
echo " Install lualogging"
do_and_check_cmd cp -r /tmp/bunkerweb/deps/src/lualogging/src/* /opt/bunkerweb/deps/lib/lua
# Compile and install luasocket
echo " Compile and install luasocket"
CHANGE_DIR="/tmp/bunkerweb/deps/src/luasocket" do_and_check_cmd make LUAINC_linux=/opt/bunkerweb/deps/include -j $NTASK
CHANGE_DIR="/tmp/bunkerweb/deps/src/luasocket" do_and_check_cmd make prefix=/opt/bunkerweb/deps CDIR_linux=lib/lua LDIR_linux=lib/lua install
# Compile and install luasec
echo " Compile and install luasec"
CHANGE_DIR="/tmp/bunkerweb/deps/src/luasec" do_and_check_cmd make INC_PATH=-I/opt/bunkerweb/deps/include linux -j $NTASK
CHANGE_DIR="/tmp/bunkerweb/deps/src/luasec" do_and_check_cmd make LUACPATH=/opt/bunkerweb/deps/lib/lua LUAPATH=/opt/bunkerweb/deps/lib/lua install
# Install lua-resty-ipmatcher
echo " Install lua-resty-ipmatcher"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-resty-ipmatcher" do_and_check_cmd make INST_PREFIX=/opt/bunkerweb/deps INST_LIBDIR=/opt/bunkerweb/deps/lib/lua INST_LUADIR=/opt/bunkerweb/deps/lib/lua install
# Install lua-resty-redis
echo " Install lua-resty-redis"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-resty-redis" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps LUA_LIB_DIR=/opt/bunkerweb/deps/lib/lua install
# Install lua-resty-upload
echo " Install lua-resty-upload"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-resty-upload" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps LUA_LIB_DIR=/opt/bunkerweb/deps/lib/lua install
# Install lujit-geoip
echo " Install luajit-geoip"
do_and_check_cmd cp -r /tmp/bunkerweb/deps/src/luajit-geoip/geoip /opt/bunkerweb/deps/lib/lua
# Install lbase64
echo " Install lbase64"
do_and_check_cmd cp -r /tmp/bunkerweb/deps/src/lbase64/base64.lua /opt/bunkerweb/deps/lib/lua
# Compile dynamic modules
echo " Compile and install dynamic modules"
CONFARGS="$(nginx -V 2>&1 | sed -n -e 's/^.*arguments: //p')"
CONFARGS="${CONFARGS/-Os -fomit-frame-pointer -g/-Os}"
if [ "$OS" = "fedora" ] ; then
CONFARGS="$(echo -n "$CONFARGS" | sed "s/--with-ld-opt='.*'//" | sed "s/--with-cc-opt='.*'//")"
fi
echo '#!/bin/bash' > "/tmp/bunkerweb/deps/src/nginx-${NGINX_VERSION}/configure-fix.sh"
echo "./configure $CONFARGS --add-dynamic-module=/tmp/bunkerweb/deps/src/ModSecurity-nginx --add-dynamic-module=/tmp/bunkerweb/deps/src/headers-more-nginx-module --add-dynamic-module=/tmp/bunkerweb/deps/src/nginx_cookie_flag_module --add-dynamic-module=/tmp/bunkerweb/deps/src/lua-nginx-module --add-dynamic-module=/tmp/bunkerweb/deps/src/ngx_brotli" >> "/tmp/bunkerweb/deps/src/nginx-${NGINX_VERSION}/configure-fix.sh"
do_and_check_cmd chmod +x "/tmp/bunkerweb/deps/src/nginx-${NGINX_VERSION}/configure-fix.sh"
CHANGE_DIR="/tmp/bunkerweb/deps/src/nginx-${NGINX_VERSION}" LUAJIT_LIB="/opt/bunkerweb/deps/lib -Wl,-rpath,/opt/bunkerweb/deps/lib" LUAJIT_INC="/opt/bunkerweb/deps/include/luajit-2.1" MODSECURITY_LIB="/opt/bunkerweb/deps/lib" MODSECURITY_INC="/opt/bunkerweb/deps/include" do_and_check_cmd ./configure-fix.sh
CHANGE_DIR="/tmp/bunkerweb/deps/src/nginx-${NGINX_VERSION}" do_and_check_cmd make -j $NTASK modules
do_and_check_cmd mkdir /opt/bunkerweb/modules
CHANGE_DIR="/tmp/bunkerweb/deps/src/nginx-${NGINX_VERSION}" do_and_check_cmd cp ./objs/*.so /opt/bunkerweb/modules
# Dependencies are installed
echo " Dependencies for BunkerWeb successfully compiled and installed !"

View File

@ -1,11 +0,0 @@
--- mmdb.lua 2022-04-04 09:32:41.456286600 +0200
+++ mmdb2.lua 2022-04-04 09:33:25.016286600 +0200
@@ -166,7 +166,7 @@
MMDB_entry_data_s *const entry_data,
...);
]])
-local lib = ffi.load("libmaxminddb")
+local lib = ffi.load("/opt/bunkerweb/deps/lib/libmaxminddb.so")
local consume_map, consume_array
local consume_value
consume_value = function(current)

View File

@ -1,571 +0,0 @@
#
# This file is autogenerated by pip-compile with python 3.10
# To update, run:
#
# pip-compile --allow-unsafe --generate-hashes
#
acme==1.31.0 \
--hash=sha256:15134d4e404937d8464ee63a16eaf11adbe2f184960e99aa60410648a53f13ac \
--hash=sha256:f5e13262fa1101c38dd865378ac8b4639f819120eb66c5538fc6c09b7576fc53
# via certbot
cachetools==5.2.0 \
--hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \
--hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db
# via google-auth
certbot==1.31.0 \
--hash=sha256:29af531d33aaa87c8104864cd31ac2af541f0ec973a7252d7f7f5b15e10479db \
--hash=sha256:dc8c86d1b56dd3ca35139967f05ed6a9264fda8107d2783f24f42868e8ae54af
# via -r requirements.in
certifi==2022.9.24 \
--hash=sha256:0d9c601124e5a6ba9712dbc60d9c53c21e34f5f641fe83002317394311bdce14 \
--hash=sha256:90c1a32f1d68f940488354e36370f6cca89f0f106db09518524c88d6ed83f382
# via
# kubernetes
# requests
cffi==1.15.1 \
--hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \
--hash=sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef \
--hash=sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104 \
--hash=sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426 \
--hash=sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405 \
--hash=sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375 \
--hash=sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a \
--hash=sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e \
--hash=sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc \
--hash=sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf \
--hash=sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185 \
--hash=sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497 \
--hash=sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3 \
--hash=sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35 \
--hash=sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c \
--hash=sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83 \
--hash=sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21 \
--hash=sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca \
--hash=sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984 \
--hash=sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac \
--hash=sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd \
--hash=sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee \
--hash=sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a \
--hash=sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2 \
--hash=sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192 \
--hash=sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7 \
--hash=sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585 \
--hash=sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f \
--hash=sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e \
--hash=sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27 \
--hash=sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b \
--hash=sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e \
--hash=sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e \
--hash=sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d \
--hash=sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c \
--hash=sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415 \
--hash=sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82 \
--hash=sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02 \
--hash=sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314 \
--hash=sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325 \
--hash=sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c \
--hash=sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3 \
--hash=sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914 \
--hash=sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045 \
--hash=sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d \
--hash=sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9 \
--hash=sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5 \
--hash=sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2 \
--hash=sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c \
--hash=sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3 \
--hash=sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2 \
--hash=sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8 \
--hash=sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d \
--hash=sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d \
--hash=sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9 \
--hash=sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162 \
--hash=sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76 \
--hash=sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4 \
--hash=sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e \
--hash=sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9 \
--hash=sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6 \
--hash=sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b \
--hash=sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01 \
--hash=sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0
# via cryptography
charset-normalizer==2.1.1 \
--hash=sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845 \
--hash=sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f
# via requests
configargparse==1.5.3 \
--hash=sha256:18f6535a2db9f6e02bd5626cc7455eac3e96b9ab3d969d366f9aafd5c5c00fe7 \
--hash=sha256:1b0b3cbf664ab59dada57123c81eff3d9737e0d11d8cf79e3d6eb10823f1739f
# via certbot
configobj==5.0.6 \
--hash=sha256:a2f5650770e1c87fb335af19a9b7eb73fc05ccf22144eb68db7d00cd2bcb0902
# via certbot
cryptography==38.0.1 \
--hash=sha256:0297ffc478bdd237f5ca3a7dc96fc0d315670bfa099c04dc3a4a2172008a405a \
--hash=sha256:10d1f29d6292fc95acb597bacefd5b9e812099d75a6469004fd38ba5471a977f \
--hash=sha256:16fa61e7481f4b77ef53991075de29fc5bacb582a1244046d2e8b4bb72ef66d0 \
--hash=sha256:194044c6b89a2f9f169df475cc167f6157eb9151cc69af8a2a163481d45cc407 \
--hash=sha256:1db3d807a14931fa317f96435695d9ec386be7b84b618cc61cfa5d08b0ae33d7 \
--hash=sha256:3261725c0ef84e7592597606f6583385fed2a5ec3909f43bc475ade9729a41d6 \
--hash=sha256:3b72c360427889b40f36dc214630e688c2fe03e16c162ef0aa41da7ab1455153 \
--hash=sha256:3e3a2599e640927089f932295a9a247fc40a5bdf69b0484532f530471a382750 \
--hash=sha256:3fc26e22840b77326a764ceb5f02ca2d342305fba08f002a8c1f139540cdfaad \
--hash=sha256:5067ee7f2bce36b11d0e334abcd1ccf8c541fc0bbdaf57cdd511fdee53e879b6 \
--hash=sha256:52e7bee800ec869b4031093875279f1ff2ed12c1e2f74923e8f49c916afd1d3b \
--hash=sha256:64760ba5331e3f1794d0bcaabc0d0c39e8c60bf67d09c93dc0e54189dfd7cfe5 \
--hash=sha256:765fa194a0f3372d83005ab83ab35d7c5526c4e22951e46059b8ac678b44fa5a \
--hash=sha256:79473cf8a5cbc471979bd9378c9f425384980fcf2ab6534b18ed7d0d9843987d \
--hash=sha256:896dd3a66959d3a5ddcfc140a53391f69ff1e8f25d93f0e2e7830c6de90ceb9d \
--hash=sha256:89ed49784ba88c221756ff4d4755dbc03b3c8d2c5103f6d6b4f83a0fb1e85294 \
--hash=sha256:ac7e48f7e7261207d750fa7e55eac2d45f720027d5703cd9007e9b37bbb59ac0 \
--hash=sha256:ad7353f6ddf285aeadfaf79e5a6829110106ff8189391704c1d8801aa0bae45a \
--hash=sha256:b0163a849b6f315bf52815e238bc2b2346604413fa7c1601eea84bcddb5fb9ac \
--hash=sha256:b6c9b706316d7b5a137c35e14f4103e2115b088c412140fdbd5f87c73284df61 \
--hash=sha256:c2e5856248a416767322c8668ef1845ad46ee62629266f84a8f007a317141013 \
--hash=sha256:ca9f6784ea96b55ff41708b92c3f6aeaebde4c560308e5fbbd3173fbc466e94e \
--hash=sha256:d1a5bd52d684e49a36582193e0b89ff267704cd4025abefb9e26803adeb3e5fb \
--hash=sha256:d3971e2749a723e9084dd507584e2a2761f78ad2c638aa31e80bc7a15c9db4f9 \
--hash=sha256:d4ef6cc305394ed669d4d9eebf10d3a101059bdcf2669c366ec1d14e4fb227bd \
--hash=sha256:d9e69ae01f99abe6ad646947bba8941e896cb3aa805be2597a0400e0764b5818
# via
# acme
# certbot
# josepy
# pyopenssl
distro==1.8.0 \
--hash=sha256:02e111d1dc6a50abb8eed6bf31c3e48ed8b0830d1ea2a1b78c61765c2513fdd8 \
--hash=sha256:99522ca3e365cac527b44bde033f64c6945d90eb9f769703caaec52b09bbd3ff
# via certbot
docker==6.0.0 \
--hash=sha256:19e330470af40167d293b0352578c1fa22d74b34d3edf5d4ff90ebc203bbb2f1 \
--hash=sha256:6e06ee8eca46cd88733df09b6b80c24a1a556bc5cb1e1ae54b2c239886d245cf
# via -r requirements.in
google-auth==2.13.0 \
--hash=sha256:9352dd6394093169157e6971526bab9a2799244d68a94a4a609f0dd751ef6f5e \
--hash=sha256:99510e664155f1a3c0396a076b5deb6367c52ea04d280152c85ac7f51f50eb42
# via kubernetes
greenlet==1.1.3.post0 \
--hash=sha256:0120a879aa2b1ac5118bce959ea2492ba18783f65ea15821680a256dfad04754 \
--hash=sha256:025b8de2273d2809f027d347aa2541651d2e15d593bbce0d5f502ca438c54136 \
--hash=sha256:05ae7383f968bba4211b1fbfc90158f8e3da86804878442b4fb6c16ccbcaa519 \
--hash=sha256:0914f02fcaa8f84f13b2df4a81645d9e82de21ed95633765dd5cc4d3af9d7403 \
--hash=sha256:0971d37ae0eaf42344e8610d340aa0ad3d06cd2eee381891a10fe771879791f9 \
--hash=sha256:0a954002064ee919b444b19c1185e8cce307a1f20600f47d6f4b6d336972c809 \
--hash=sha256:0aa1845944e62f358d63fcc911ad3b415f585612946b8edc824825929b40e59e \
--hash=sha256:104f29dd822be678ef6b16bf0035dcd43206a8a48668a6cae4d2fe9c7a7abdeb \
--hash=sha256:11fc7692d95cc7a6a8447bb160d98671ab291e0a8ea90572d582d57361360f05 \
--hash=sha256:17a69967561269b691747e7f436d75a4def47e5efcbc3c573180fc828e176d80 \
--hash=sha256:2794eef1b04b5ba8948c72cc606aab62ac4b0c538b14806d9c0d88afd0576d6b \
--hash=sha256:2c6e942ca9835c0b97814d14f78da453241837419e0d26f7403058e8db3e38f8 \
--hash=sha256:2ccdc818cc106cc238ff7eba0d71b9c77be868fdca31d6c3b1347a54c9b187b2 \
--hash=sha256:325f272eb997916b4a3fc1fea7313a8adb760934c2140ce13a2117e1b0a8095d \
--hash=sha256:39464518a2abe9c505a727af7c0b4efff2cf242aa168be5f0daa47649f4d7ca8 \
--hash=sha256:3a24f3213579dc8459e485e333330a921f579543a5214dbc935bc0763474ece3 \
--hash=sha256:3aeac044c324c1a4027dca0cde550bd83a0c0fbff7ef2c98df9e718a5086c194 \
--hash=sha256:3c22998bfef3fcc1b15694818fc9b1b87c6cc8398198b96b6d355a7bcb8c934e \
--hash=sha256:467b73ce5dcd89e381292fb4314aede9b12906c18fab903f995b86034d96d5c8 \
--hash=sha256:4a8b58232f5b72973350c2b917ea3df0bebd07c3c82a0a0e34775fc2c1f857e9 \
--hash=sha256:4f74aa0092602da2069df0bc6553919a15169d77bcdab52a21f8c5242898f519 \
--hash=sha256:5662492df0588a51d5690f6578f3bbbd803e7f8d99a99f3bf6128a401be9c269 \
--hash=sha256:5c2d21c2b768d8c86ad935e404cc78c30d53dea009609c3ef3a9d49970c864b5 \
--hash=sha256:5edf75e7fcfa9725064ae0d8407c849456553a181ebefedb7606bac19aa1478b \
--hash=sha256:60839ab4ea7de6139a3be35b77e22e0398c270020050458b3d25db4c7c394df5 \
--hash=sha256:62723e7eb85fa52e536e516ee2ac91433c7bb60d51099293671815ff49ed1c21 \
--hash=sha256:64e10f303ea354500c927da5b59c3802196a07468332d292aef9ddaca08d03dd \
--hash=sha256:66aa4e9a726b70bcbfcc446b7ba89c8cec40f405e51422c39f42dfa206a96a05 \
--hash=sha256:695d0d8b5ae42c800f1763c9fce9d7b94ae3b878919379150ee5ba458a460d57 \
--hash=sha256:70048d7b2c07c5eadf8393e6398595591df5f59a2f26abc2f81abca09610492f \
--hash=sha256:7afa706510ab079fd6d039cc6e369d4535a48e202d042c32e2097f030a16450f \
--hash=sha256:7cf37343e43404699d58808e51f347f57efd3010cc7cee134cdb9141bd1ad9ea \
--hash=sha256:8149a6865b14c33be7ae760bcdb73548bb01e8e47ae15e013bf7ef9290ca309a \
--hash=sha256:814f26b864ed2230d3a7efe0336f5766ad012f94aad6ba43a7c54ca88dd77cba \
--hash=sha256:82a38d7d2077128a017094aff334e67e26194f46bd709f9dcdacbf3835d47ef5 \
--hash=sha256:83a7a6560df073ec9de2b7cb685b199dfd12519bc0020c62db9d1bb522f989fa \
--hash=sha256:8415239c68b2ec9de10a5adf1130ee9cb0ebd3e19573c55ba160ff0ca809e012 \
--hash=sha256:88720794390002b0c8fa29e9602b395093a9a766b229a847e8d88349e418b28a \
--hash=sha256:890f633dc8cb307761ec566bc0b4e350a93ddd77dc172839be122be12bae3e10 \
--hash=sha256:8926a78192b8b73c936f3e87929931455a6a6c6c385448a07b9f7d1072c19ff3 \
--hash=sha256:8c0581077cf2734569f3e500fab09c0ff6a2ab99b1afcacbad09b3c2843ae743 \
--hash=sha256:8fda1139d87ce5f7bd80e80e54f9f2c6fe2f47983f1a6f128c47bf310197deb6 \
--hash=sha256:91a84faf718e6f8b888ca63d0b2d6d185c8e2a198d2a7322d75c303e7097c8b7 \
--hash=sha256:924df1e7e5db27d19b1359dc7d052a917529c95ba5b8b62f4af611176da7c8ad \
--hash=sha256:949c9061b8c6d3e6e439466a9be1e787208dec6246f4ec5fffe9677b4c19fcc3 \
--hash=sha256:9649891ab4153f217f319914455ccf0b86986b55fc0573ce803eb998ad7d6854 \
--hash=sha256:96656c5f7c95fc02c36d4f6ef32f4e94bb0b6b36e6a002c21c39785a4eec5f5d \
--hash=sha256:a812df7282a8fc717eafd487fccc5ba40ea83bb5b13eb3c90c446d88dbdfd2be \
--hash=sha256:a8d24eb5cb67996fb84633fdc96dbc04f2d8b12bfcb20ab3222d6be271616b67 \
--hash=sha256:bef49c07fcb411c942da6ee7d7ea37430f830c482bf6e4b72d92fd506dd3a427 \
--hash=sha256:bffba15cff4802ff493d6edcf20d7f94ab1c2aee7cfc1e1c7627c05f1102eee8 \
--hash=sha256:c0643250dd0756f4960633f5359884f609a234d4066686754e834073d84e9b51 \
--hash=sha256:c6f90234e4438062d6d09f7d667f79edcc7c5e354ba3a145ff98176f974b8132 \
--hash=sha256:c8c9301e3274276d3d20ab6335aa7c5d9e5da2009cccb01127bddb5c951f8870 \
--hash=sha256:c8ece5d1a99a2adcb38f69af2f07d96fb615415d32820108cd340361f590d128 \
--hash=sha256:cb863057bed786f6622982fb8b2c122c68e6e9eddccaa9fa98fd937e45ee6c4f \
--hash=sha256:ccbe7129a282ec5797df0451ca1802f11578be018a32979131065565da89b392 \
--hash=sha256:d25cdedd72aa2271b984af54294e9527306966ec18963fd032cc851a725ddc1b \
--hash=sha256:d75afcbb214d429dacdf75e03a1d6d6c5bd1fa9c35e360df8ea5b6270fb2211c \
--hash=sha256:d7815e1519a8361c5ea2a7a5864945906f8e386fa1bc26797b4d443ab11a4589 \
--hash=sha256:eb6ac495dccb1520667cfea50d89e26f9ffb49fa28496dea2b95720d8b45eb54 \
--hash=sha256:ec615d2912b9ad807afd3be80bf32711c0ff9c2b00aa004a45fd5d5dde7853d9 \
--hash=sha256:f5e09dc5c6e1796969fd4b775ea1417d70e49a5df29aaa8e5d10675d9e11872c \
--hash=sha256:f6661b58412879a2aa099abb26d3c93e91dedaba55a6394d1fb1512a77e85de9 \
--hash=sha256:f7d20c3267385236b4ce54575cc8e9f43e7673fc761b069c820097092e318e3b \
--hash=sha256:fe7c51f8a2ab616cb34bc33d810c887e89117771028e1e3d3b77ca25ddeace04
# via sqlalchemy
idna==3.4 \
--hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
--hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
# via requests
jinja2==3.1.2 \
--hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \
--hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61
# via -r requirements.in
josepy==1.13.0 \
--hash=sha256:6f64eb35186aaa1776b7a1768651b1c616cab7f9685f9660bffc6491074a5390 \
--hash=sha256:8931daf38f8a4c85274a0e8b7cb25addfd8d1f28f9fb8fbed053dd51aec75dc9
# via
# acme
# certbot
kubernetes==25.3.0 \
--hash=sha256:213befbb4e5aed95f94950c7eed0c2322fc5a2f8f40932e58d28fdd42d90836c \
--hash=sha256:eb42333dad0bb5caf4e66460c6a4a1a36f0f057a040f35018f6c05a699baed86
# via -r requirements.in
markupsafe==2.1.1 \
--hash=sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003 \
--hash=sha256:089cf3dbf0cd6c100f02945abeb18484bd1ee57a079aefd52cffd17fba910b88 \
--hash=sha256:10c1bfff05d95783da83491be968e8fe789263689c02724e0c691933c52994f5 \
--hash=sha256:33b74d289bd2f5e527beadcaa3f401e0df0a89927c1559c8566c066fa4248ab7 \
--hash=sha256:3799351e2336dc91ea70b034983ee71cf2f9533cdff7c14c90ea126bfd95d65a \
--hash=sha256:3ce11ee3f23f79dbd06fb3d63e2f6af7b12db1d46932fe7bd8afa259a5996603 \
--hash=sha256:421be9fbf0ffe9ffd7a378aafebbf6f4602d564d34be190fc19a193232fd12b1 \
--hash=sha256:43093fb83d8343aac0b1baa75516da6092f58f41200907ef92448ecab8825135 \
--hash=sha256:46d00d6cfecdde84d40e572d63735ef81423ad31184100411e6e3388d405e247 \
--hash=sha256:4a33dea2b688b3190ee12bd7cfa29d39c9ed176bda40bfa11099a3ce5d3a7ac6 \
--hash=sha256:4b9fe39a2ccc108a4accc2676e77da025ce383c108593d65cc909add5c3bd601 \
--hash=sha256:56442863ed2b06d19c37f94d999035e15ee982988920e12a5b4ba29b62ad1f77 \
--hash=sha256:671cd1187ed5e62818414afe79ed29da836dde67166a9fac6d435873c44fdd02 \
--hash=sha256:694deca8d702d5db21ec83983ce0bb4b26a578e71fbdbd4fdcd387daa90e4d5e \
--hash=sha256:6a074d34ee7a5ce3effbc526b7083ec9731bb3cbf921bbe1d3005d4d2bdb3a63 \
--hash=sha256:6d0072fea50feec76a4c418096652f2c3238eaa014b2f94aeb1d56a66b41403f \
--hash=sha256:6fbf47b5d3728c6aea2abb0589b5d30459e369baa772e0f37a0320185e87c980 \
--hash=sha256:7f91197cc9e48f989d12e4e6fbc46495c446636dfc81b9ccf50bb0ec74b91d4b \
--hash=sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812 \
--hash=sha256:8dc1c72a69aa7e082593c4a203dcf94ddb74bb5c8a731e4e1eb68d031e8498ff \
--hash=sha256:8e3dcf21f367459434c18e71b2a9532d96547aef8a871872a5bd69a715c15f96 \
--hash=sha256:8e576a51ad59e4bfaac456023a78f6b5e6e7651dcd383bcc3e18d06f9b55d6d1 \
--hash=sha256:96e37a3dc86e80bf81758c152fe66dbf60ed5eca3d26305edf01892257049925 \
--hash=sha256:97a68e6ada378df82bc9f16b800ab77cbf4b2fada0081794318520138c088e4a \
--hash=sha256:99a2a507ed3ac881b975a2976d59f38c19386d128e7a9a18b7df6fff1fd4c1d6 \
--hash=sha256:a49907dd8420c5685cfa064a1335b6754b74541bbb3706c259c02ed65b644b3e \
--hash=sha256:b09bf97215625a311f669476f44b8b318b075847b49316d3e28c08e41a7a573f \
--hash=sha256:b7bd98b796e2b6553da7225aeb61f447f80a1ca64f41d83612e6139ca5213aa4 \
--hash=sha256:b87db4360013327109564f0e591bd2a3b318547bcef31b468a92ee504d07ae4f \
--hash=sha256:bcb3ed405ed3222f9904899563d6fc492ff75cce56cba05e32eff40e6acbeaa3 \
--hash=sha256:d4306c36ca495956b6d568d276ac11fdd9c30a36f1b6eb928070dc5360b22e1c \
--hash=sha256:d5ee4f386140395a2c818d149221149c54849dfcfcb9f1debfe07a8b8bd63f9a \
--hash=sha256:dda30ba7e87fbbb7eab1ec9f58678558fd9a6b8b853530e176eabd064da81417 \
--hash=sha256:e04e26803c9c3851c931eac40c695602c6295b8d432cbe78609649ad9bd2da8a \
--hash=sha256:e1c0b87e09fa55a220f058d1d49d3fb8df88fbfab58558f1198e08c1e1de842a \
--hash=sha256:e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37 \
--hash=sha256:e8c843bbcda3a2f1e3c2ab25913c80a3c5376cd00c6e8c4a86a89a28c8dc5452 \
--hash=sha256:efc1913fd2ca4f334418481c7e595c00aad186563bbc1ec76067848c7ca0a933 \
--hash=sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a \
--hash=sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7
# via jinja2
maxminddb==2.2.0 \
--hash=sha256:e37707ec4fab115804670e0fb7aedb4b57075a8b6f80052bdc648d3c005184e5
# via -r requirements.in
oauthlib==3.2.2 \
--hash=sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca \
--hash=sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918
# via requests-oauthlib
packaging==21.3 \
--hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \
--hash=sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522
# via docker
parsedatetime==2.6 \
--hash=sha256:4cb368fbb18a0b7231f4d76119165451c8d2e35951455dfee97c62a87b04d455 \
--hash=sha256:cb96edd7016872f58479e35879294258c71437195760746faffedb692aef000b
# via certbot
pyasn1==0.4.8 \
--hash=sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d \
--hash=sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba
# via
# pyasn1-modules
# rsa
pyasn1-modules==0.2.8 \
--hash=sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e \
--hash=sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74
# via google-auth
pycparser==2.21 \
--hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \
--hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206
# via cffi
pymysql==1.0.2 \
--hash=sha256:41fc3a0c5013d5f039639442321185532e3e2c8924687abe6537de157d403641 \
--hash=sha256:816927a350f38d56072aeca5dfb10221fe1dc653745853d30a216637f5d7ad36
# via -r requirements.in
pyopenssl==22.1.0 \
--hash=sha256:7a83b7b272dd595222d672f5ce29aa030f1fb837630ef229f62e72e395ce8968 \
--hash=sha256:b28437c9773bb6c6958628cf9c3bebe585de661dba6f63df17111966363dd15e
# via
# acme
# josepy
pyparsing==3.0.9 \
--hash=sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb \
--hash=sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc
# via packaging
pyrfc3339==1.1 \
--hash=sha256:67196cb83b470709c580bb4738b83165e67c6cc60e1f2e4f286cfcb402a926f4 \
--hash=sha256:81b8cbe1519cdb79bed04910dd6fa4e181faf8c88dff1e1b987b5f7ab23a5b1a
# via
# acme
# certbot
python-dateutil==2.8.2 \
--hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \
--hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9
# via kubernetes
python-dotenv==0.21.0 \
--hash=sha256:1684eb44636dd462b66c3ee016599815514527ad99965de77f43e0944634a7e5 \
--hash=sha256:b77d08274639e3d34145dfa6c7008e66df0f04b7be7a75fd0d5292c191d79045
# via -r requirements.in
pytz==2022.5 \
--hash=sha256:335ab46900b1465e714b4fda4963d87363264eb662aab5e65da039c25f1f5b22 \
--hash=sha256:c4d88f472f54d615e9cd582a5004d1e5f624854a6a27a6211591c251f22a6914
# via
# acme
# certbot
# pyrfc3339
pyyaml==6.0 \
--hash=sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf \
--hash=sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293 \
--hash=sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b \
--hash=sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57 \
--hash=sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b \
--hash=sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4 \
--hash=sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07 \
--hash=sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba \
--hash=sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9 \
--hash=sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287 \
--hash=sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513 \
--hash=sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0 \
--hash=sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782 \
--hash=sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0 \
--hash=sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92 \
--hash=sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f \
--hash=sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2 \
--hash=sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc \
--hash=sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1 \
--hash=sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c \
--hash=sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86 \
--hash=sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4 \
--hash=sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c \
--hash=sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34 \
--hash=sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b \
--hash=sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d \
--hash=sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c \
--hash=sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb \
--hash=sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7 \
--hash=sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737 \
--hash=sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3 \
--hash=sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d \
--hash=sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358 \
--hash=sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53 \
--hash=sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78 \
--hash=sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803 \
--hash=sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a \
--hash=sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f \
--hash=sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174 \
--hash=sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5
# via kubernetes
requests==2.28.1 \
--hash=sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983 \
--hash=sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349
# via
# -r requirements.in
# acme
# docker
# kubernetes
# requests-oauthlib
# requests-toolbelt
requests-oauthlib==1.3.1 \
--hash=sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5 \
--hash=sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a
# via kubernetes
requests-toolbelt==0.10.1 \
--hash=sha256:18565aa58116d9951ac39baa288d3adb5b3ff975c4f25eee78555d89e8f247f7 \
--hash=sha256:62e09f7ff5ccbda92772a29f394a49c3ad6cb181d568b1337626b2abb628a63d
# via acme
rsa==4.9 \
--hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \
--hash=sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21
# via google-auth
schedule==1.1.0 \
--hash=sha256:617adce8b4bf38c360b781297d59918fbebfb2878f1671d189f4f4af5d0567a4 \
--hash=sha256:e6ca13585e62c810e13a08682e0a6a8ad245372e376ba2b8679294f377dfc8e4
# via -r requirements.in
six==1.16.0 \
--hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
--hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
# via
# configobj
# google-auth
# kubernetes
# python-dateutil
sqlalchemy==1.4.42 \
--hash=sha256:04f2598c70ea4a29b12d429a80fad3a5202d56dce19dd4916cc46a965a5ca2e9 \
--hash=sha256:0501f74dd2745ec38f44c3a3900fb38b9db1ce21586b691482a19134062bf049 \
--hash=sha256:0ee377eb5c878f7cefd633ab23c09e99d97c449dd999df639600f49b74725b80 \
--hash=sha256:11b2ec26c5d2eefbc3e6dca4ec3d3d95028be62320b96d687b6e740424f83b7d \
--hash=sha256:15d878929c30e41fb3d757a5853b680a561974a0168cd33a750be4ab93181628 \
--hash=sha256:177e41914c476ed1e1b77fd05966ea88c094053e17a85303c4ce007f88eff363 \
--hash=sha256:1811a0b19a08af7750c0b69e38dec3d46e47c4ec1d74b6184d69f12e1c99a5e0 \
--hash=sha256:1d0c23ecf7b3bc81e29459c34a3f4c68ca538de01254e24718a7926810dc39a6 \
--hash=sha256:22459fc1718785d8a86171bbe7f01b5c9d7297301ac150f508d06e62a2b4e8d2 \
--hash=sha256:28e881266a172a4d3c5929182fde6bb6fba22ac93f137d5380cc78a11a9dd124 \
--hash=sha256:2e56dfed0cc3e57b2f5c35719d64f4682ef26836b81067ee6cfad062290fd9e2 \
--hash=sha256:2fd49af453e590884d9cdad3586415922a8e9bb669d874ee1dc55d2bc425aacd \
--hash=sha256:3ab7c158f98de6cb4f1faab2d12973b330c2878d0c6b689a8ca424c02d66e1b3 \
--hash=sha256:4948b6c5f4e56693bbeff52f574279e4ff972ea3353f45967a14c30fb7ae2beb \
--hash=sha256:4e1c5f8182b4f89628d782a183d44db51b5af84abd6ce17ebb9804355c88a7b5 \
--hash=sha256:5ce6929417d5dce5ad1d3f147db81735a4a0573b8fb36e3f95500a06eaddd93e \
--hash=sha256:5ede1495174e69e273fad68ad45b6d25c135c1ce67723e40f6cf536cb515e20b \
--hash=sha256:5f966b64c852592469a7eb759615bbd351571340b8b344f1d3fa2478b5a4c934 \
--hash=sha256:6045b3089195bc008aee5c273ec3ba9a93f6a55bc1b288841bd4cfac729b6516 \
--hash=sha256:6c9d004eb78c71dd4d3ce625b80c96a827d2e67af9c0d32b1c1e75992a7916cc \
--hash=sha256:6e39e97102f8e26c6c8550cb368c724028c575ec8bc71afbbf8faaffe2b2092a \
--hash=sha256:723e3b9374c1ce1b53564c863d1a6b2f1dc4e97b1c178d9b643b191d8b1be738 \
--hash=sha256:876eb185911c8b95342b50a8c4435e1c625944b698a5b4a978ad2ffe74502908 \
--hash=sha256:9256563506e040daddccaa948d055e006e971771768df3bb01feeb4386c242b0 \
--hash=sha256:934472bb7d8666727746a75670a1f8d91a9cae8c464bba79da30a0f6faccd9e1 \
--hash=sha256:97ff50cd85bb907c2a14afb50157d0d5486a4b4639976b4a3346f34b6d1b5272 \
--hash=sha256:9b01d9cd2f9096f688c71a3d0f33f3cd0af8549014e66a7a7dee6fc214a7277d \
--hash=sha256:9e3a65ce9ed250b2f096f7b559fe3ee92e6605fab3099b661f0397a9ac7c8d95 \
--hash=sha256:a7dd5b7b34a8ba8d181402d824b87c5cee8963cb2e23aa03dbfe8b1f1e417cde \
--hash=sha256:a85723c00a636eed863adb11f1e8aaa36ad1c10089537823b4540948a8429798 \
--hash=sha256:b42c59ffd2d625b28cdb2ae4cde8488543d428cba17ff672a543062f7caee525 \
--hash=sha256:bd448b262544b47a2766c34c0364de830f7fb0772d9959c1c42ad61d91ab6565 \
--hash=sha256:ca9389a00f639383c93ed00333ed763812f80b5ae9e772ea32f627043f8c9c88 \
--hash=sha256:df76e9c60879fdc785a34a82bf1e8691716ffac32e7790d31a98d7dec6e81545 \
--hash=sha256:e12c6949bae10f1012ab5c0ea52ab8db99adcb8c7b717938252137cdf694c775 \
--hash=sha256:e4ef8cb3c5b326f839bfeb6af5f406ba02ad69a78c7aac0fbeeba994ad9bb48a \
--hash=sha256:e7e740453f0149437c101ea4fdc7eea2689938c5760d7dcc436c863a12f1f565 \
--hash=sha256:effc89e606165ca55f04f3f24b86d3e1c605e534bf1a96e4e077ce1b027d0b71 \
--hash=sha256:f0f574465b78f29f533976c06b913e54ab4980b9931b69aa9d306afff13a9471 \
--hash=sha256:fa5b7eb2051e857bf83bade0641628efe5a88de189390725d3e6033a1fff4257 \
--hash=sha256:fdb94a3d1ba77ff2ef11912192c066f01e68416f554c194d769391638c8ad09a
# via -r requirements.in
urllib3==1.26.12 \
--hash=sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e \
--hash=sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997
# via
# docker
# kubernetes
# requests
websocket-client==1.4.1 \
--hash=sha256:398909eb7e261f44b8f4bd474785b6ec5f5b499d4953342fe9755e01ef624090 \
--hash=sha256:f9611eb65c8241a67fb373bef040b3cf8ad377a9f6546a12b620b6511e8ea9ef
# via
# docker
# kubernetes
zope-component==5.0.1 \
--hash=sha256:32cbe426ba8fa7b62ce5b211f80f0718a0c749cc7ff09e3f4b43a57f7ccdf5e5 \
--hash=sha256:e955eb9f1e55d30e2d8097c8baa9ee012c356887eef3b0d43e6bfcd4868221e5
# via certbot
zope-event==4.5.0 \
--hash=sha256:2666401939cdaa5f4e0c08cf7f20c9b21423b95e88f4675b1443973bdb080c42 \
--hash=sha256:5e76517f5b9b119acf37ca8819781db6c16ea433f7e2062c4afc2b6fbedb1330
# via zope-component
zope-hookable==5.2 \
--hash=sha256:00615d623295aaaad569a2e77b9659359a0c8cffd8077aaa9c114b55c3c78440 \
--hash=sha256:120c9a4f10db108f93edc7195c2efd15bccf3dda5b7ca45d4af16bf7c04381ad \
--hash=sha256:16d7ac04fe3bb7e8cfaf5303694ce861af9cecd84e559a1a82d2c7037fc489bc \
--hash=sha256:2151318d6c9167bdbf9def70c56807488ae95cc2741ec2461d3b30e6ecdf6865 \
--hash=sha256:22d47fcb3769bcdf071289a4e0ae576a00b65ff890c51995d7b2f8b68a9fa86c \
--hash=sha256:34e1ff9d76c4d65f87ab859cb68f6b3ed2e9b1337c40568347de910fcf963d8c \
--hash=sha256:3a0f478514051788ff6e070086be44a3caee5962661f422c74562353770995d6 \
--hash=sha256:3b3c9d5f419704f4d8e1327d4061c12a147a43025846bce0d4a6fc01447aeba9 \
--hash=sha256:3c6043bc00e3a49e32a3957207ce07d381f6d343b4b6ea243ccb4314b56a55d4 \
--hash=sha256:48287c32e274672a6d3997415e6b5bea43808029b07255647e93746f3c5c33b7 \
--hash=sha256:4c3018bcf2b39cf5cccf40826f799b4b8c140056db780f96cb0ca332a243bd29 \
--hash=sha256:5496d75a533cdac2dacbce4a3e694dda4b4cbc45d225c1ad914526c8961aa14b \
--hash=sha256:5e5a8412b91352b0adf2120492ffd5380452632ccd1cd625c937898d5d171d5f \
--hash=sha256:66d80ecc62fb9f1ad21e9efa1571d18d486ab772ffd674abaa39076ad43837e4 \
--hash=sha256:68bbd32e010b42db5d9878b98b84222f474c0641a42bd8b30bc3deffd05111c3 \
--hash=sha256:71f00eebcd23aa26231f9130144fb146459bb4b665498c84cd0c332d45491cf5 \
--hash=sha256:73d3161798337f7416444d3265d6939e2abd3d0f4bf30dd7116bab04707c1cff \
--hash=sha256:74d425c708d690f0504711c5ecbcd2573cea4c355c42d2081a85c3bb76db7d6a \
--hash=sha256:7aa7776c642cad6ea655a8c11ca8b0daa0e5513b5b91dd55b2a3512db259117f \
--hash=sha256:7dab7c76d0bb151eb21198c7edef834d3bf0812bc40e0183deea527531ae512b \
--hash=sha256:802c5222a6f09d01fa5d999147c3e4ef74d4f8c33fb376a85990c211b9c52e49 \
--hash=sha256:83c42469b5adb0240d61619076d0eb9a65da5e6acb8ff27dd56c7073321582a7 \
--hash=sha256:883338bfd1b14e56b13c406fb12ce793ade19460aeec7b02cd895351a08468e7 \
--hash=sha256:8abc311ebed511bbe46d95c339e7ec853275e377635b16ea10b6148890cf9b7e \
--hash=sha256:9f31beced1c28288286197e9f95173314ab4dd88bdc7a1d6aa7f3e45cf743942 \
--hash=sha256:a50320c718610a48cd6f1ebd1c2bd65aeb8501b0a929bbdc88a898a19a744d4c \
--hash=sha256:ae6f86f1f0dcd999eaf008b4524314624e60aa793c31454df6f86b092494ab60 \
--hash=sha256:af4ed40fd81ef292f0c196f40b59e65b3d19e48e40ccb1490e73a10125b31577 \
--hash=sha256:b0c1c63664ee7389db4e437e24fa6025f334593b2f63ed47e4ae6b1e82257366 \
--hash=sha256:b3e7a4661ddabb933d80c560099f4fca6db4b58a54a97dcaccef90bf6b7bc431 \
--hash=sha256:d3c0632b51dc0f0743054e4ae22672439774b756b632244e745f05bbc4345771 \
--hash=sha256:df68bd26bef4a338c062a8f6a0eca8d27582b212aa74ae58714d410f348cb314 \
--hash=sha256:e3a48050d820722907d9fad9f6e9e30f928e8a6d724aa173abd521dec8964e05 \
--hash=sha256:e45964866a1c59c2660573730ae2409c6168873c412b57cec44b221b374b7c0f \
--hash=sha256:eb08b8f23a75af9a9003d8990247fd2fdac7e486dafae32d2a147e805545235b \
--hash=sha256:fabd9152bfb335892a4f980a00e861f31cec09bceffae1e9686189ea1e55141a \
--hash=sha256:fdd147669c38ea64a3b745a01a217ac21a76b69777ca165fd2953d1c675b1779
# via zope-component
zope-interface==5.5.0 \
--hash=sha256:006f8dd81fae28027fc28ada214855166712bf4f0bfbc5a8788f9b70982b9437 \
--hash=sha256:03f5ae315db0d0de668125d983e2a819a554f3fdb2d53b7e934e3eb3c3c7375d \
--hash=sha256:0eb2b3e84f48dd9cfc8621c80fba905d7e228615c67f76c7df7c716065669bb6 \
--hash=sha256:1e3495bb0cdcea212154e558082c256f11b18031f05193ae2fb85d048848db14 \
--hash=sha256:26c1456520fdcafecc5765bec4783eeafd2e893eabc636908f50ee31fe5c738c \
--hash=sha256:2cb3003941f5f4fa577479ac6d5db2b940acb600096dd9ea9bf07007f5cab46f \
--hash=sha256:37ec9ade9902f412cc7e7a32d71f79dec3035bad9bd0170226252eed88763c48 \
--hash=sha256:3eedf3d04179774d750e8bb4463e6da350956a50ed44d7b86098e452d7ec385e \
--hash=sha256:3f68404edb1a4fb6aa8a94675521ca26c83ebbdbb90e894f749ae0dc4ca98418 \
--hash=sha256:423c074e404f13e6fa07f4454f47fdbb38d358be22945bc812b94289d9142374 \
--hash=sha256:43490ad65d4c64e45a30e51a2beb7a6b63e1ff395302ad22392224eb618476d6 \
--hash=sha256:47ff078734a1030c48103422a99e71a7662d20258c00306546441adf689416f7 \
--hash=sha256:58a66c2020a347973168a4a9d64317bac52f9fdfd3e6b80b252be30da881a64e \
--hash=sha256:58a975f89e4584d0223ab813c5ba4787064c68feef4b30d600f5e01de90ae9ce \
--hash=sha256:5c6023ae7defd052cf76986ce77922177b0c2f3913bea31b5b28fbdf6cb7099e \
--hash=sha256:6566b3d2657e7609cd8751bcb1eab1202b1692a7af223035a5887d64bb3a2f3b \
--hash=sha256:687cab7f9ae18d2c146f315d0ca81e5ffe89a139b88277afa70d52f632515854 \
--hash=sha256:700ebf9662cf8df70e2f0cb4988e078c53f65ee3eefd5c9d80cf988c4175c8e3 \
--hash=sha256:740f3c1b44380658777669bcc42f650f5348e53797f2cee0d93dc9b0f9d7cc69 \
--hash=sha256:7bdcec93f152e0e1942102537eed7b166d6661ae57835b20a52a2a3d6a3e1bf3 \
--hash=sha256:7d9ec1e6694af39b687045712a8ad14ddcb568670d5eb1b66b48b98b9312afba \
--hash=sha256:85dd6dd9aaae7a176948d8bb62e20e2968588fd787c29c5d0d964ab475168d3d \
--hash=sha256:8b9f153208d74ccfa25449a0c6cb756ab792ce0dc99d9d771d935f039b38740c \
--hash=sha256:8c791f4c203ccdbcda588ea4c8a6e4353e10435ea48ddd3d8734a26fe9714cba \
--hash=sha256:970661ece2029915b8f7f70892e88404340fbdefd64728380cad41c8dce14ff4 \
--hash=sha256:9cdc4e898d3b1547d018829fd4a9f403e52e51bba24be0fbfa37f3174e1ef797 \
--hash=sha256:9dc4493aa3d87591e3d2bf1453e25b98038c839ca8e499df3d7106631b66fe83 \
--hash=sha256:a69c28d85bb7cf557751a5214cb3f657b2b035c8c96d71080c1253b75b79b69b \
--hash=sha256:aeac590cce44e68ee8ad0b8ecf4d7bf15801f102d564ca1b0eb1f12f584ee656 \
--hash=sha256:be11fce0e6af6c0e8d93c10ef17b25aa7c4acb7ec644bff2596c0d639c49e20f \
--hash=sha256:cbbf83914b9a883ab324f728de869f4e406e0cbcd92df7e0a88decf6f9ab7d5a \
--hash=sha256:cfa614d049667bed1c737435c609c0956c5dc0dbafdc1145ee7935e4658582cb \
--hash=sha256:d18fb0f6c8169d26044128a2e7d3c39377a8a151c564e87b875d379dbafd3930 \
--hash=sha256:d80f6236b57a95eb19d5e47eb68d0296119e1eff6deaa2971ab8abe3af918420 \
--hash=sha256:da7912ae76e1df6a1fb841b619110b1be4c86dfb36699d7fd2f177105cdea885 \
--hash=sha256:df6593e150d13cfcce69b0aec5df7bc248cb91e4258a7374c129bb6d56b4e5ca \
--hash=sha256:f70726b60009433111fe9928f5d89cbb18962411d33c45fb19eb81b9bbd26fcd
# via
# certbot
# zope-component
# The following packages are considered to be unsafe in a requirements file:
setuptools==65.5.0 \
--hash=sha256:512e5536220e38146176efb833d4a62aa726b7bbff82cfbc8ba9eaa3996e0b17 \
--hash=sha256:f62ea9da9ed6289bfe868cd6845968a2c854d1427f8548d52cae02a42b4f0356
# via
# acme
# certbot
# josepy
# kubernetes
# zope-component
# zope-event
# zope-hookable
# zope-interface

View File

@ -1,28 +0,0 @@
#!/usr/bin/env python
"""
Generates a Lua table of fingerprints.
One can then add, turn off or delete fingerprints from lua.
"""
def make_lua_table(obj):
"""
Generates table. Fingerprints don't contain any special chars
so they don't need to be escaped. The output may be
sorted but it is not required.
"""
fp = obj["fingerprints"]
print("sqlifingerprints = {")
for f in fp:
print(' ["{0}"]=true,'.format(f))
print("}")
return 0
if __name__ == "__main__":
import sys
import json
with open("../c/sqlparse_data.json", "r") as fd:
make_lua_table(json.load(fd))

View File

@ -1,121 +0,0 @@
#!/usr/bin/env python
# A 'nullserver' that accepts input and generates output
# to trick sqlmap into thinking it's a database-driven site
#
import sys
import logging
import urllib
import tornado.httpserver
import tornado.ioloop
import tornado.web
import libinjection
class ShutdownHandler(tornado.web.RequestHandler):
def get(self):
global fd
fd.close()
sys.exit(0)
class CountHandler(tornado.web.RequestHandler):
def get(self):
global count
self.write(str(count) + "\n")
def boring(arg):
if arg == "":
return True
if arg == "foo":
return True
if arg == "NULL":
return True
try:
float(arg)
return True
except ValueError:
pass
return False
class NullHandler(tornado.web.RequestHandler):
def get(self):
global fd
global count
params = self.request.arguments.get("id", [])
sqli = False
if len(params) == 0 or (len(params) == 1 and boring(params[0])):
# if no args, or a single value with uninteresting input
# then just exit
self.write("<html><head><title>safe</title></head><body></body></html>")
return
for arg in params:
sqli = libinjection.detectsqli(arg)
if sqli:
break
# we didn't detect it :-(
if not sqli:
count += 1
args = [arg.strip() for arg in params]
# fd.write(' | '.join(args) + "\n")
for arg in args:
extra = {}
sqli = libinjection.detectsqli(arg, extra)
logging.error(
"\t" + arg + "\t" + str(sqli) + "\t" + extra["fingerprint"] + "\n"
)
# for arg in param:
# fd.write(arg + "\n")
# #fd.write(urllib.quote_plus(arg) + "\n")
self.set_status(500)
self.write("<html><head><title>safe</title></head><body></body></html>")
else:
self.write("<html><head><title>sqli</title></head><body></body></html>")
import os
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static"),
"cookie_secret": "yo mama sayz=",
"xsrf_cookies": True,
"gzip": False,
}
application = tornado.web.Application(
[
(r"/null", NullHandler),
(r"/shutdown", ShutdownHandler),
(r"/count", CountHandler),
],
**settings
)
if __name__ == "__main__":
global fd
global count
count = 0
fd = open("./sqlmap-false-negatives.txt", "w")
import tornado.options
# tornado.options.parse_config_file("/etc/server.conf")
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()

View File

@ -1,136 +0,0 @@
#!/usr/bin/env python3
"""
Takes testing files and turns them PHP module tests
"""
import glob
import os
def phpescape(s):
"""
escapes plain text into php-code
"""
return s.replace("\\", "\\\\").replace("$", "\\$")
def readtestdata(filename):
"""
Read a test file and split into components
"""
state = None
info = {"--TEST--": "", "--INPUT--": "", "--EXPECTED--": ""}
for line in open(filename, "r"):
line = line.rstrip()
if line in ("--TEST--", "--INPUT--", "--EXPECTED--"):
state = line
elif state:
info[state] += line + "\n"
# remove last newline from input
info["--INPUT--"] = info["--INPUT--"][0:-1]
return (info["--TEST--"], info["--INPUT--"].strip(), info["--EXPECTED--"].strip())
def gentest_tokens():
"""
generate token phpt test
"""
for testname in sorted(glob.glob("../tests/test-tokens-*.txt")):
data = readtestdata(os.path.join("../tests", testname))
testname = os.path.basename(testname)
phpt = """
--TEST--
{1}
--FILE--
<?php
require(sprintf("%s/../testsupport.php", dirname(__FILE__)));
$sqlistate = new_libinjection_sqli_state();
$s = <<<EOT
{2}
EOT;
$s = trim($s);
libinjection_sqli_init($sqlistate, $s, FLAG_QUOTE_NONE | FLAG_SQL_ANSI);
while (libinjection_sqli_tokenize($sqlistate)) {{
echo(print_token(libinjection_sqli_state_current_get($sqlistate)) . "\\n");
}}
--EXPECT--
{3}
"""
phpt = phpt.format(testname, data[0], phpescape(data[1]), data[2])
with open("build/tests/" + testname.replace(".txt", ".phpt"), "w") as fd:
fd.write(phpt.strip())
def gentest_folding():
for testname in sorted(glob.glob("../tests/test-folding-*.txt")):
data = readtestdata(os.path.join("../tests", testname))
testname = os.path.basename(testname)
phpt = """
--TEST--
{1}
--FILE--
<?php
require(sprintf("%s/../testsupport.php", dirname(__FILE__)));
$sqlistate = new_libinjection_sqli_state();
$s = <<<EOT
{2}
EOT;
$s = trim($s);
libinjection_sqli_init($sqlistate, $s, FLAG_QUOTE_NONE | FLAG_SQL_ANSI);
$fingerprint = libinjection_sqli_fingerprint($sqlistate, FLAG_QUOTE_NONE | FLAG_SQL_ANSI);
for ($i = 0; $i < strlen($fingerprint); $i++) {{
echo(print_token(libinjection_sqli_get_token($sqlistate, $i)) . "\\n");
}}
--EXPECT--
{3}
"""
phpt = phpt.format(testname, data[0], phpescape(data[1]), data[2])
with open("build/tests/" + testname.replace(".txt", ".phpt"), "w") as fd:
fd.write(phpt.strip())
def gentest_fingerprints():
"""
generate phpt for testing sqli testing
"""
for testname in sorted(glob.glob("../tests/test-sqli-*.txt")):
data = readtestdata(os.path.join("../tests", testname))
testname = os.path.basename(testname)
phpt = """
--TEST--
{0}
--DESCRIPTION--
{1}
--FILE--
<?php
require(sprintf("%s/../testsupport.php", dirname(__FILE__)));
$sqlistate = new_libinjection_sqli_state();
$s = <<<EOT
{2}
EOT;
$s = trim($s);
libinjection_sqli_init($sqlistate, $s, FLAG_QUOTE_NONE | FLAG_SQL_ANSI);
$ok = libinjection_is_sqli($sqlistate);
if ($ok == 1) {{
echo(libinjection_sqli_state_fingerprint_get($sqlistate) . "\n");
}}
--EXPECT--
{3}
"""
phpt = phpt.format(testname, data[0], phpescape(data[1]), data[2])
with open("build/tests/" + testname.replace(".txt", ".phpt"), "w") as fd:
fd.write(phpt.strip())
if __name__ == "__main__":
gentest_tokens()
gentest_folding()
gentest_fingerprints()

View File

@ -1,55 +0,0 @@
#!/usr/bin/env python3
#
# Copyright 2012, 2013 Nick Galbreath
# nickg@client9.com
# BSD License -- see COPYING.txt for details
#
"""
Converts a libinjection JSON data file to PHP array
"""
def toc(obj):
"""main routine"""
print(
"""<?php
function lookup($state, $stype, $keyword) {
$keyword = struper(keyword);
if ($stype == libinjection.LOOKUP_FINGERPRINT) {
if ($keyword == $fingerprints && libinjection.sqli_not_whitelist($state)) {
return 'F';
} else {
return chr(0);
}
}
return $words.get(keyword, chr(0));
}
"""
)
words = {}
keywords = obj["keywords"]
for k, v in keywords.items():
words[str(k)] = str(v)
print("$words = array(")
for k in sorted(words.keys()):
print("'{0}' => '{1}',".format(k, words[k]))
print(");\n")
keywords = obj["fingerprints"]
print("$fingerprints = array(")
for k in sorted(keywords):
print("'{0}',".format(k.upper()))
print(");")
return 0
if __name__ == "__main__":
import sys
import json
sys.exit(toc(json.load(sys.stdin)))

View File

@ -1 +0,0 @@
sqli_fingerprints = set(["1234"])

View File

@ -1,50 +0,0 @@
"""
libinjection module for python
Copyright 2012, 2013, 2014 Nick Galbreath
nickg@client9.com
BSD License -- see COPYING.txt for details
"""
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
MODULE = Extension(
"_libinjection",
[
"libinjection/libinjection_wrap.c",
"libinjection/libinjection_sqli.c",
"libinjection/libinjection_html5.c",
"libinjection/libinjection_xss.c",
],
swig_opts=["-Wextra", "-builtin"],
define_macros=[],
include_dirs=[],
libraries=[],
library_dirs=[],
)
setup(
name="libinjection",
version="3.9.1",
description="Wrapper around libinjection c-code to detect sqli",
author="Nick Galbreath",
author_email="nickg@client9.com",
url="https://libinjection.client9.com/",
ext_modules=[MODULE],
packages=["libinjection"],
long_description="""
wrapper around libinjection
""",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Topic :: Database",
"Topic :: Security",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Topic :: Internet :: Log Analysis",
"Topic :: Internet :: WWW/HTTP",
],
)

View File

@ -1,92 +0,0 @@
#!/usr/bin/env python
from libinjection import *
from words import *
import time
def lookup_null(state, style, keyword):
return ""
def lookup_c(state, style, keyword):
return ""
# return sqli_lookup_word(state, style, keyword)
def lookup_upcase(state, stype, keyword):
if stype == libinjection.LOOKUP_FINGERPRINT:
return words.get("0" + keyword.upper(), "")
else:
return words.get(keyword.upper(), "")
def main():
inputs = (
"123 LIKE -1234.5678E+2;",
"APPLE 19.123 'FOO' \"BAR\"",
"/* BAR */ UNION ALL SELECT (2,3,4)",
"1 || COS(+0X04) --FOOBAR",
"dog apple @cat banana bar",
"dog apple cat \"banana 'bar",
"102 TABLE CLOTH",
)
imax = 100000
t0 = time.clock()
sfilter = sqli_state()
for i in xrange(imax):
s = inputs[i % 7]
sqli_init(sfilter, s, 0)
is_sqli(sfilter)
t1 = time.clock()
total = imax / (t1 - t0)
print("python->c TPS = {0}".format(total))
t0 = time.clock()
sfilter = sqli_state()
for i in xrange(imax):
s = inputs[i % 7]
sqli_init(sfilter, s, 0)
sqli_callback(sfilter, lookup_null)
is_sqli(sfilter)
t1 = time.clock()
total = imax / (t1 - t0)
print("python lookup_null TPS = {0}".format(total))
t0 = time.clock()
sfilter = sqli_state()
for i in xrange(imax):
s = inputs[i % 7]
sqli_init(sfilter, s, 0)
sqli_callback(sfilter, lookup_upcase)
is_sqli(sfilter)
t1 = time.clock()
total = imax / (t1 - t0)
print("python lookup_upcase TPS = {0}".format(total))
t0 = time.clock()
sfilter = sqli_state()
for i in xrange(imax):
s = inputs[i % 7]
sqli_init(sfilter, s, 0)
sqli_callback(sfilter, lookup_c)
is_sqli(sfilter)
t1 = time.clock()
total = imax / (t1 - t0)
print("python lookup_c TPS = {0}".format(total))
t0 = time.clock()
sfilter = sqli_state()
for i in xrange(imax):
s = inputs[i % 7]
sqli_init(sfilter, s, 0)
sqli_callback(sfilter, lookup)
is_sqli(sfilter)
t1 = time.clock()
total = imax / (t1 - t0)
print("python lookup TPS = {0}".format(total))
if __name__ == "__main__":
main()

View File

@ -1,60 +0,0 @@
#!/usr/bin/env python3
"""
Small script to convert fingerprints back to SQL or SQLi
"""
import subprocess
RMAP = {
"1": "1",
"f": "convert",
"&": "and",
"v": "@version",
"n": "aname",
"s": '"1"',
"(": "(",
")": ")",
"o": "*",
"E": "select",
"U": "union",
"k": "JOIN",
"t": "binary",
",": ",",
";": ";",
"c": " -- comment",
"T": "DROP",
":": ":",
"A": "COLLATE",
"B": "group by",
"X": "/* /* nested comment */ */",
}
def fingerprint_to_sqli():
"""
main code, expects to be run in main libinjection/src directory
and hardwires "fingerprints.txt" as input file
"""
mode = "print"
fingerprints = []
with open("fingerprints.txt", "r") as openfile:
for line in openfile:
fingerprints.append(line.strip())
for fingerprint in fingerprints:
sql = []
for char in fingerprint:
sql.append(RMAP[char])
sqlstr = " ".join(sql)
if mode == "print":
print(fingerprint, " ".join(sql))
else:
args = ["./fptool", "-0", sqlstr]
actualfp = subprocess.check_output(args).strip()
if fingerprint != actualfp:
print(fingerprint, actualfp, " ".join(sql))
if __name__ == "__main__":
fingerprint_to_sqli()

View File

@ -1,491 +0,0 @@
#!/usr/bin/env python3
# pylint: disable=C0103,R0911,R0912,R0915
# disable short-variable-names, too many branches, returns, statements
"""
fingerprint fuzzer and generator
Given a fingerprint, this generates other similar fingerprints
that are functionally equivalent for SQLi detection
"""
import sys
class PermuteFingerprints(object):
"""class to mutate / fuzz fingerprints to find new SQLi"""
def __init__(self):
"""initialization"""
self.fingerprints = set()
self.blacklist = set(
[
"E1n",
"sns",
"1&n",
"s1s",
"1n1",
"1o1",
"1os",
"sn1",
"sonc",
"so1",
"n&n",
"son",
"nov",
"n&s",
"E1s",
"nos",
"nkn&n",
"1sn",
"n&nkn",
"s1n",
"n&nEn",
"s&sn",
"1os1o",
"sU",
"nU",
"n,(n)",
"n&n&n",
"Enkn",
"nk1;",
"1os1o",
"1n1;",
"s*1s",
"1s1",
"nknEn",
"n&sn",
"so1",
"nkn;",
"n&n;",
"von",
"n&nc",
"sonkn",
"n)o1",
"Enn;",
"nBn",
"Ennc",
"n&En",
"nEnEn",
"Esn",
"n1s",
"n(1)s",
"En1",
"En(1)",
"n(1)n",
"n1v",
"n(1)1",
"n&EUE",
"n&EkU",
"s&EUE",
"s&EkU",
"v&EUE",
"v&EkU",
"n&nTn",
"nA",
"nos;n",
"UEn",
"so1no",
"1)on",
"1k(1)",
"s)on",
"1;TnE",
"s&1s",
"n)c",
"svs",
"1n(1)",
"so1s(",
"son1s",
"nf(1n",
"so1sf",
"son1s",
"nf(n)",
"En1c",
"n)on",
"nok&n",
"n;Tkn",
"nEnc",
"nok&1",
"nok&f",
"nok&s",
"nok&v",
"nk(n)",
"nknc",
"son1n",
"n&nBn",
]
)
self.whitelist = set(["T(vv)", "Tnvos", "Tnv;", "1UEnn", "1;Tvk"])
def aslist(self):
"""
return the fingerprints as a sorted list
"""
return sorted(list(self.fingerprints))
def insert(self, fingerprint):
"""
insert a new fingerprint, with possible variations
"""
if len(fingerprint) > 5:
fingerprint = fingerprint[0:5]
if self.validate(fingerprint):
self.fingerprints.add(fingerprint)
def validate(self, s):
"""
detemines if a fingerprint could be used a SQLi
"""
if len(s) == 0:
return False
if s in self.whitelist:
return True
if s in self.blacklist:
return False
# SQL Types are rarely used
if "t" in s and "f(t" not in s and "At" not in s:
return False
if "Un" in s:
return False
if "1nf" in s:
return False
if "s1o" in s:
return False
if "oo" in s:
return False
if "v,s" in s:
return False
if "s,v" in s:
return False
if "v,v" in s:
return False
if "v,1" in s:
return False
if "v,n" in s:
return False
if "n,v" in s:
return False
if "1,v" in s:
return False
if "Eo(" in s:
return False
if "(o(" in s:
return False
if "(o1" in s:
return False
if "(on" in s:
return False
if "(os" in s:
return False
if "(of" in s:
return False
if "(ov" in s:
return False
if "B(n)" in s:
return False
if "oso" in s:
return False
if "o1o" in s:
return False
if "ono" in s:
return False
# only 1 special case for this
# 1;foo:goto foo
# 1;n:k
# the 'foo' can only be a 'n' type
if ":" in s and not "n:" in s:
return False
if "11" in s:
return False
if "))" in s:
return False
if "((" in s:
return False
if "v1" in s:
return False
if "nv" in s and "T" not in s:
return False
if "nn" in s and "T" not in s:
return False
# select @version foo is legit
# but unlikely anywhere else
if "vn" in s and "Evn" not in s:
return False
if "oE" in s:
return False
if "A1" in s:
return False
if "An" in s:
return False
if "A(1" in s:
return False
if "vov" in s:
return False
if "vo1" in s:
return False
if "von" in s:
return False
if "ns" in s:
if "U" in s:
return True
if "T" in s:
return True
return False
if "sn" in s:
# that is... Tsn is ok
if s.find("T") != -1 and s.find("T") < s.find("sn"):
return True
return False
# select foo (as) bar is only nn type i know
if "nn" in s and "Enn" not in s and "T" not in s:
return False
if ",o" in s:
return False
if "kk" in s and "Tkk" not in s:
return False
if "ss" in s:
return False
if "ff" in s:
return False
if "1no" in s:
return False
if "kno" in s:
return False
if "nEk" in s:
return False
if "n(n" in s:
return False
if "1so" in s:
return False
if "1s1" in s:
return False
if "noo" in s:
return False
if "ooo" in s:
return False
if "vvv" in s:
return False
if "1vn" in s:
return False
if "1n1" in s:
return False
if "&1n" in s:
return False
if "&1v" in s:
return False
if "&1s" in s:
return False
if "nnk" in s:
return False
if "n1f" in s:
return False
# folded away
if s.startswith("("):
return False
if "&o" in s:
return False
if "1,1" in s:
return False
if "1,s" in s:
return False
if "1,n" in s:
return False
if "s,1" in s:
return False
if "s,s" in s:
return False
if "s,n" in s:
return False
if "n,1" in s:
return False
if "n,s" in s:
return False
if "n,n" in s:
return False
if "1o1" in s:
return False
if "1on" in s:
return False
if "no1" in s:
return False
if "non" in s:
return False
if "1(v" in s:
return False
if "1(n" in s:
return False
if "1(s" in s:
return False
if "1(1" in s:
return False
if "s(s" in s:
return False
if "s(n" in s:
return False
if "s(1" in s:
return False
if "s(v" in s:
return False
if "v(s" in s:
return False
if "v(n" in s:
return False
if "v(1" in s and "Tv(1" not in s:
return False
if "v(v" in s:
return False
if "TTT" in s:
return False
if s.startswith("n("):
return False
if s.startswith("vs"):
return False
if s.startswith("o"):
return False
if ")(" in s:
return False
# need to investigate T(vv) to see
# if it's correct
if "vv" in s and s != "T(vv)":
return False
# unlikely to be sqli but case FP
if s in ("so1n)", "sonoE"):
return False
return True
def permute(self, fp):
"""
generate alternative (possiblely invalid) fingerprints
"""
self.insert(fp)
# do this for safety
if len(fp) > 1 and len(fp) < 5 and fp[-1] != ";" and fp[-1] != "c":
self.insert(fp + ";")
self.insert(fp + ";c")
# do this for safety
if len(fp) > 1 and len(fp) < 5 and fp[-1] != "c":
self.insert(fp + "c")
for i in range(len(fp)):
if fp[i] == "1":
self.insert(fp[0:i] + "n" + fp[i + 1 :])
self.insert(fp[0:i] + "v" + fp[i + 1 :])
self.insert(fp[0:i] + "s" + fp[i + 1 :])
self.insert(fp[0:i] + "f(1)" + fp[i + 1 :])
self.insert(fp[0:i] + "f()" + fp[i + 1 :])
self.insert(fp[0:i] + "1os" + fp[i + 1 :])
self.insert(fp[0:i] + "1ov" + fp[i + 1 :])
self.insert(fp[0:i] + "1on" + fp[i + 1 :])
self.insert(fp[0:i] + "(1)" + fp[i + 1 :])
elif fp[i] == "s":
self.insert(fp[0:i] + "v" + fp[i + 1 :])
self.insert(fp[0:i] + "1" + fp[i + 1 :])
self.insert(fp[0:i] + "f(1)" + fp[i + 1 :])
self.insert(fp[0:i] + "f()" + fp[i + 1 :])
self.insert(fp[0:i] + "so1" + fp[i + 1 :])
self.insert(fp[0:i] + "sov" + fp[i + 1 :])
self.insert(fp[0:i] + "son" + fp[i + 1 :])
self.insert(fp[0:i] + "(s)" + fp[i + 1 :])
elif fp[i] == "v":
self.insert(fp[0:i] + "s" + fp[i + 1 :])
self.insert(fp[0:i] + "1" + fp[i + 1 :])
self.insert(fp[0:i] + "f(1)" + fp[i + 1 :])
self.insert(fp[0:i] + "f()" + fp[i + 1 :])
self.insert(fp[0:i] + "vo1" + fp[i + 1 :])
self.insert(fp[0:i] + "vos" + fp[i + 1 :])
self.insert(fp[0:i] + "von" + fp[i + 1 :])
self.insert(fp[0:i] + "(v)" + fp[i + 1 :])
elif fp[i] == "E":
# Select top, select distinct, case when
self.insert(fp[0:i] + "Ek" + fp[i + 1 :])
elif fp[i] == ")":
self.insert(fp[0:i] + "))" + fp[i + 1 :])
self.insert(fp[0:i] + ")))" + fp[i + 1 :])
self.insert(fp[0:i] + "))))" + fp[i + 1 :])
if ";E" in fp:
self.insert(fp.replace(";E", ";T"))
if fp.startswith("T"):
self.insert("1;" + fp)
self.insert("1);" + fp)
if "At" in fp:
self.insert(fp.replace("At", "As"))
if "(" in fp:
done = False
parts = []
for char in fp:
if char == "(" and done is False:
parts.append(char)
done = True
parts.append(char)
newline = "".join(parts)
self.insert(newline)
done = False
parts = []
for char in fp:
if char == "(":
if done is True:
parts.append(char)
else:
done = True
parts.append(char)
newline = "".join(parts)
self.insert(newline)
done = False
parts = []
for char in fp:
if char == "(":
parts.append(char)
parts.append(char)
newline = "".join(parts)
self.insert(newline)
def main():
"""main entrance"""
mutator = PermuteFingerprints()
for line in sys.stdin:
mutator.permute(line.strip())
for fingerprint in mutator.aslist():
print(fingerprint)
if __name__ == "__main__":
main()

View File

@ -1,136 +0,0 @@
#!/usr/bin/env python3
#
# Copyright 2012, 2013 Nick Galbreath
# nickg@client9.com
# BSD License -- see COPYING.txt for details
#
"""
Converts a libinjection JSON data file to a C header (.h) file
"""
import sys
def toc(obj):
"""main routine"""
print(
"""
#ifndef LIBINJECTION_SQLI_DATA_H
#define LIBINJECTION_SQLI_DATA_H
#include "libinjection.h"
#include "libinjection_sqli.h"
typedef struct {
const char *word;
char type;
} keyword_t;
static size_t parse_money(sfilter * sf);
static size_t parse_other(sfilter * sf);
static size_t parse_white(sfilter * sf);
static size_t parse_operator1(sfilter *sf);
static size_t parse_char(sfilter *sf);
static size_t parse_hash(sfilter *sf);
static size_t parse_dash(sfilter *sf);
static size_t parse_slash(sfilter *sf);
static size_t parse_backslash(sfilter * sf);
static size_t parse_operator2(sfilter *sf);
static size_t parse_string(sfilter *sf);
static size_t parse_word(sfilter * sf);
static size_t parse_var(sfilter * sf);
static size_t parse_number(sfilter * sf);
static size_t parse_tick(sfilter * sf);
static size_t parse_ustring(sfilter * sf);
static size_t parse_qstring(sfilter * sf);
static size_t parse_nqstring(sfilter * sf);
static size_t parse_xstring(sfilter * sf);
static size_t parse_bstring(sfilter * sf);
static size_t parse_estring(sfilter * sf);
static size_t parse_bword(sfilter * sf);
"""
)
#
# Mapping of character to function
#
fnmap = {
"CHAR_WORD": "parse_word",
"CHAR_WHITE": "parse_white",
"CHAR_OP1": "parse_operator1",
"CHAR_UNARY": "parse_operator1",
"CHAR_OP2": "parse_operator2",
"CHAR_BANG": "parse_operator2",
"CHAR_BACK": "parse_backslash",
"CHAR_DASH": "parse_dash",
"CHAR_STR": "parse_string",
"CHAR_HASH": "parse_hash",
"CHAR_NUM": "parse_number",
"CHAR_SLASH": "parse_slash",
"CHAR_SEMICOLON": "parse_char",
"CHAR_COMMA": "parse_char",
"CHAR_LEFTPARENS": "parse_char",
"CHAR_RIGHTPARENS": "parse_char",
"CHAR_LEFTBRACE": "parse_char",
"CHAR_RIGHTBRACE": "parse_char",
"CHAR_VAR": "parse_var",
"CHAR_OTHER": "parse_other",
"CHAR_MONEY": "parse_money",
"CHAR_TICK": "parse_tick",
"CHAR_UNDERSCORE": "parse_underscore",
"CHAR_USTRING": "parse_ustring",
"CHAR_QSTRING": "parse_qstring",
"CHAR_NQSTRING": "parse_nqstring",
"CHAR_XSTRING": "parse_xstring",
"CHAR_BSTRING": "parse_bstring",
"CHAR_ESTRING": "parse_estring",
"CHAR_BWORD": "parse_bword",
}
print()
print("typedef size_t (*pt2Function)(sfilter *sf);")
print("static const pt2Function char_parse_map[] = {")
pos = 0
for character in obj["charmap"]:
print(" &%s, /* %d */" % (fnmap[character], pos))
pos += 1
print("};")
print()
# keywords
# load them
keywords = obj["keywords"]
for fingerprint in list(obj["fingerprints"]):
fingerprint = "0" + fingerprint.upper()
keywords[fingerprint] = "F"
needhelp = []
for key in keywords.keys():
if key != key.upper():
needhelp.append(key)
for key in needhelp:
tmpv = keywords[key]
del keywords[key]
keywords[key.upper()] = tmpv
print("static const keyword_t sql_keywords[] = {")
for k in sorted(keywords.keys()):
if len(k) > 31:
sys.stderr.write("ERROR: keyword greater than 32 chars\n")
sys.exit(1)
print(" {\"%s\", '%s'}," % (k, keywords[k]))
print("};")
print("static const size_t sql_keywords_sz = %d;" % (len(keywords),))
print("#endif")
return 0
if __name__ == "__main__":
import json
sys.exit(toc(json.load(sys.stdin)))

File diff suppressed because it is too large Load Diff

View File

@ -1,28 +0,0 @@
#!/usr/bin/env python
"""
Generates a Lua table of fingerprints.
One can then add, turn off or delete fingerprints from lua.
"""
def make_lua_table(obj):
"""
Generates table. Fingerprints don't contain any special chars
so they don't need to be escaped. The output may be
sorted but it is not required.
"""
fp = obj["fingerprints"]
print("sqlifingerprints = {")
for f in fp:
print(' ["{0}"]=true,'.format(f))
print("}")
return 0
if __name__ == "__main__":
import sys
import json
with open("../c/sqlparse_data.json", "r") as fd:
make_lua_table(json.load(fd))

View File

@ -1,121 +0,0 @@
#!/usr/bin/env python
# A 'nullserver' that accepts input and generates output
# to trick sqlmap into thinking it's a database-driven site
#
import sys
import logging
import urllib
import tornado.httpserver
import tornado.ioloop
import tornado.web
import libinjection
class ShutdownHandler(tornado.web.RequestHandler):
def get(self):
global fd
fd.close()
sys.exit(0)
class CountHandler(tornado.web.RequestHandler):
def get(self):
global count
self.write(str(count) + "\n")
def boring(arg):
if arg == "":
return True
if arg == "foo":
return True
if arg == "NULL":
return True
try:
float(arg)
return True
except ValueError:
pass
return False
class NullHandler(tornado.web.RequestHandler):
def get(self):
global fd
global count
params = self.request.arguments.get("id", [])
sqli = False
if len(params) == 0 or (len(params) == 1 and boring(params[0])):
# if no args, or a single value with uninteresting input
# then just exit
self.write("<html><head><title>safe</title></head><body></body></html>")
return
for arg in params:
sqli = libinjection.detectsqli(arg)
if sqli:
break
# we didn't detect it :-(
if not sqli:
count += 1
args = [arg.strip() for arg in params]
# fd.write(' | '.join(args) + "\n")
for arg in args:
extra = {}
sqli = libinjection.detectsqli(arg, extra)
logging.error(
"\t" + arg + "\t" + str(sqli) + "\t" + extra["fingerprint"] + "\n"
)
# for arg in param:
# fd.write(arg + "\n")
# #fd.write(urllib.quote_plus(arg) + "\n")
self.set_status(500)
self.write("<html><head><title>safe</title></head><body></body></html>")
else:
self.write("<html><head><title>sqli</title></head><body></body></html>")
import os
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static"),
"cookie_secret": "yo mama sayz=",
"xsrf_cookies": True,
"gzip": False,
}
application = tornado.web.Application(
[
(r"/null", NullHandler),
(r"/shutdown", ShutdownHandler),
(r"/count", CountHandler),
],
**settings
)
if __name__ == "__main__":
global fd
global count
count = 0
fd = open("./sqlmap-false-negatives.txt", "w")
import tornado.options
# tornado.options.parse_config_file("/etc/server.conf")
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()

View File

@ -1,136 +0,0 @@
#!/usr/bin/env python3
"""
Takes testing files and turns them PHP module tests
"""
import glob
import os
def phpescape(s):
"""
escapes plain text into php-code
"""
return s.replace("\\", "\\\\").replace("$", "\\$")
def readtestdata(filename):
"""
Read a test file and split into components
"""
state = None
info = {"--TEST--": "", "--INPUT--": "", "--EXPECTED--": ""}
for line in open(filename, "r"):
line = line.rstrip()
if line in ("--TEST--", "--INPUT--", "--EXPECTED--"):
state = line
elif state:
info[state] += line + "\n"
# remove last newline from input
info["--INPUT--"] = info["--INPUT--"][0:-1]
return (info["--TEST--"], info["--INPUT--"].strip(), info["--EXPECTED--"].strip())
def gentest_tokens():
"""
generate token phpt test
"""
for testname in sorted(glob.glob("../tests/test-tokens-*.txt")):
data = readtestdata(os.path.join("../tests", testname))
testname = os.path.basename(testname)
phpt = """
--TEST--
{1}
--FILE--
<?php
require(sprintf("%s/../testsupport.php", dirname(__FILE__)));
$sqlistate = new_libinjection_sqli_state();
$s = <<<EOT
{2}
EOT;
$s = trim($s);
libinjection_sqli_init($sqlistate, $s, FLAG_QUOTE_NONE | FLAG_SQL_ANSI);
while (libinjection_sqli_tokenize($sqlistate)) {{
echo(print_token(libinjection_sqli_state_current_get($sqlistate)) . "\\n");
}}
--EXPECT--
{3}
"""
phpt = phpt.format(testname, data[0], phpescape(data[1]), data[2])
with open("build/tests/" + testname.replace(".txt", ".phpt"), "w") as fd:
fd.write(phpt.strip())
def gentest_folding():
for testname in sorted(glob.glob("../tests/test-folding-*.txt")):
data = readtestdata(os.path.join("../tests", testname))
testname = os.path.basename(testname)
phpt = """
--TEST--
{1}
--FILE--
<?php
require(sprintf("%s/../testsupport.php", dirname(__FILE__)));
$sqlistate = new_libinjection_sqli_state();
$s = <<<EOT
{2}
EOT;
$s = trim($s);
libinjection_sqli_init($sqlistate, $s, FLAG_QUOTE_NONE | FLAG_SQL_ANSI);
$fingerprint = libinjection_sqli_fingerprint($sqlistate, FLAG_QUOTE_NONE | FLAG_SQL_ANSI);
for ($i = 0; $i < strlen($fingerprint); $i++) {{
echo(print_token(libinjection_sqli_get_token($sqlistate, $i)) . "\\n");
}}
--EXPECT--
{3}
"""
phpt = phpt.format(testname, data[0], phpescape(data[1]), data[2])
with open("build/tests/" + testname.replace(".txt", ".phpt"), "w") as fd:
fd.write(phpt.strip())
def gentest_fingerprints():
"""
generate phpt for testing sqli testing
"""
for testname in sorted(glob.glob("../tests/test-sqli-*.txt")):
data = readtestdata(os.path.join("../tests", testname))
testname = os.path.basename(testname)
phpt = """
--TEST--
{0}
--DESCRIPTION--
{1}
--FILE--
<?php
require(sprintf("%s/../testsupport.php", dirname(__FILE__)));
$sqlistate = new_libinjection_sqli_state();
$s = <<<EOT
{2}
EOT;
$s = trim($s);
libinjection_sqli_init($sqlistate, $s, FLAG_QUOTE_NONE | FLAG_SQL_ANSI);
$ok = libinjection_is_sqli($sqlistate);
if ($ok == 1) {{
echo(libinjection_sqli_state_fingerprint_get($sqlistate) . "\n");
}}
--EXPECT--
{3}
"""
phpt = phpt.format(testname, data[0], phpescape(data[1]), data[2])
with open("build/tests/" + testname.replace(".txt", ".phpt"), "w") as fd:
fd.write(phpt.strip())
if __name__ == "__main__":
gentest_tokens()
gentest_folding()
gentest_fingerprints()

View File

@ -1,55 +0,0 @@
#!/usr/bin/env python3
#
# Copyright 2012, 2013 Nick Galbreath
# nickg@client9.com
# BSD License -- see COPYING.txt for details
#
"""
Converts a libinjection JSON data file to PHP array
"""
def toc(obj):
"""main routine"""
print(
"""<?php
function lookup($state, $stype, $keyword) {
$keyword = struper(keyword);
if ($stype == libinjection.LOOKUP_FINGERPRINT) {
if ($keyword == $fingerprints && libinjection.sqli_not_whitelist($state)) {
return 'F';
} else {
return chr(0);
}
}
return $words.get(keyword, chr(0));
}
"""
)
words = {}
keywords = obj["keywords"]
for k, v in keywords.items():
words[str(k)] = str(v)
print("$words = array(")
for k in sorted(words.keys()):
print("'{0}' => '{1}',".format(k, words[k]))
print(");\n")
keywords = obj["fingerprints"]
print("$fingerprints = array(")
for k in sorted(keywords):
print("'{0}',".format(k.upper()))
print(");")
return 0
if __name__ == "__main__":
import sys
import json
sys.exit(toc(json.load(sys.stdin)))

View File

@ -1 +0,0 @@
sqli_fingerprints = set(["1234"])

View File

@ -1,50 +0,0 @@
"""
libinjection module for python
Copyright 2012, 2013, 2014 Nick Galbreath
nickg@client9.com
BSD License -- see COPYING.txt for details
"""
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
MODULE = Extension(
"_libinjection",
[
"libinjection/libinjection_wrap.c",
"libinjection/libinjection_sqli.c",
"libinjection/libinjection_html5.c",
"libinjection/libinjection_xss.c",
],
swig_opts=["-Wextra", "-builtin"],
define_macros=[],
include_dirs=[],
libraries=[],
library_dirs=[],
)
setup(
name="libinjection",
version="3.9.1",
description="Wrapper around libinjection c-code to detect sqli",
author="Nick Galbreath",
author_email="nickg@client9.com",
url="https://libinjection.client9.com/",
ext_modules=[MODULE],
packages=["libinjection"],
long_description="""
wrapper around libinjection
""",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Topic :: Database",
"Topic :: Security",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Topic :: Internet :: Log Analysis",
"Topic :: Internet :: WWW/HTTP",
],
)

View File

@ -1,92 +0,0 @@
#!/usr/bin/env python
from libinjection import *
from words import *
import time
def lookup_null(state, style, keyword):
return ""
def lookup_c(state, style, keyword):
return ""
# return sqli_lookup_word(state, style, keyword)
def lookup_upcase(state, stype, keyword):
if stype == libinjection.LOOKUP_FINGERPRINT:
return words.get("0" + keyword.upper(), "")
else:
return words.get(keyword.upper(), "")
def main():
inputs = (
"123 LIKE -1234.5678E+2;",
"APPLE 19.123 'FOO' \"BAR\"",
"/* BAR */ UNION ALL SELECT (2,3,4)",
"1 || COS(+0X04) --FOOBAR",
"dog apple @cat banana bar",
"dog apple cat \"banana 'bar",
"102 TABLE CLOTH",
)
imax = 100000
t0 = time.clock()
sfilter = sqli_state()
for i in xrange(imax):
s = inputs[i % 7]
sqli_init(sfilter, s, 0)
is_sqli(sfilter)
t1 = time.clock()
total = imax / (t1 - t0)
print("python->c TPS = {0}".format(total))
t0 = time.clock()
sfilter = sqli_state()
for i in xrange(imax):
s = inputs[i % 7]
sqli_init(sfilter, s, 0)
sqli_callback(sfilter, lookup_null)
is_sqli(sfilter)
t1 = time.clock()
total = imax / (t1 - t0)
print("python lookup_null TPS = {0}".format(total))
t0 = time.clock()
sfilter = sqli_state()
for i in xrange(imax):
s = inputs[i % 7]
sqli_init(sfilter, s, 0)
sqli_callback(sfilter, lookup_upcase)
is_sqli(sfilter)
t1 = time.clock()
total = imax / (t1 - t0)
print("python lookup_upcase TPS = {0}".format(total))
t0 = time.clock()
sfilter = sqli_state()
for i in xrange(imax):
s = inputs[i % 7]
sqli_init(sfilter, s, 0)
sqli_callback(sfilter, lookup_c)
is_sqli(sfilter)
t1 = time.clock()
total = imax / (t1 - t0)
print("python lookup_c TPS = {0}".format(total))
t0 = time.clock()
sfilter = sqli_state()
for i in xrange(imax):
s = inputs[i % 7]
sqli_init(sfilter, s, 0)
sqli_callback(sfilter, lookup)
is_sqli(sfilter)
t1 = time.clock()
total = imax / (t1 - t0)
print("python lookup TPS = {0}".format(total))
if __name__ == "__main__":
main()

View File

@ -1,60 +0,0 @@
#!/usr/bin/env python3
"""
Small script to convert fingerprints back to SQL or SQLi
"""
import subprocess
RMAP = {
"1": "1",
"f": "convert",
"&": "and",
"v": "@version",
"n": "aname",
"s": '"1"',
"(": "(",
")": ")",
"o": "*",
"E": "select",
"U": "union",
"k": "JOIN",
"t": "binary",
",": ",",
";": ";",
"c": " -- comment",
"T": "DROP",
":": ":",
"A": "COLLATE",
"B": "group by",
"X": "/* /* nested comment */ */",
}
def fingerprint_to_sqli():
"""
main code, expects to be run in main libinjection/src directory
and hardwires "fingerprints.txt" as input file
"""
mode = "print"
fingerprints = []
with open("fingerprints.txt", "r") as openfile:
for line in openfile:
fingerprints.append(line.strip())
for fingerprint in fingerprints:
sql = []
for char in fingerprint:
sql.append(RMAP[char])
sqlstr = " ".join(sql)
if mode == "print":
print(fingerprint, " ".join(sql))
else:
args = ["./fptool", "-0", sqlstr]
actualfp = subprocess.check_output(args).strip()
if fingerprint != actualfp:
print(fingerprint, actualfp, " ".join(sql))
if __name__ == "__main__":
fingerprint_to_sqli()

View File

@ -1,491 +0,0 @@
#!/usr/bin/env python3
# pylint: disable=C0103,R0911,R0912,R0915
# disable short-variable-names, too many branches, returns, statements
"""
fingerprint fuzzer and generator
Given a fingerprint, this generates other similar fingerprints
that are functionally equivalent for SQLi detection
"""
import sys
class PermuteFingerprints(object):
"""class to mutate / fuzz fingerprints to find new SQLi"""
def __init__(self):
"""initialization"""
self.fingerprints = set()
self.blacklist = set(
[
"E1n",
"sns",
"1&n",
"s1s",
"1n1",
"1o1",
"1os",
"sn1",
"sonc",
"so1",
"n&n",
"son",
"nov",
"n&s",
"E1s",
"nos",
"nkn&n",
"1sn",
"n&nkn",
"s1n",
"n&nEn",
"s&sn",
"1os1o",
"sU",
"nU",
"n,(n)",
"n&n&n",
"Enkn",
"nk1;",
"1os1o",
"1n1;",
"s*1s",
"1s1",
"nknEn",
"n&sn",
"so1",
"nkn;",
"n&n;",
"von",
"n&nc",
"sonkn",
"n)o1",
"Enn;",
"nBn",
"Ennc",
"n&En",
"nEnEn",
"Esn",
"n1s",
"n(1)s",
"En1",
"En(1)",
"n(1)n",
"n1v",
"n(1)1",
"n&EUE",
"n&EkU",
"s&EUE",
"s&EkU",
"v&EUE",
"v&EkU",
"n&nTn",
"nA",
"nos;n",
"UEn",
"so1no",
"1)on",
"1k(1)",
"s)on",
"1;TnE",
"s&1s",
"n)c",
"svs",
"1n(1)",
"so1s(",
"son1s",
"nf(1n",
"so1sf",
"son1s",
"nf(n)",
"En1c",
"n)on",
"nok&n",
"n;Tkn",
"nEnc",
"nok&1",
"nok&f",
"nok&s",
"nok&v",
"nk(n)",
"nknc",
"son1n",
"n&nBn",
]
)
self.whitelist = set(["T(vv)", "Tnvos", "Tnv;", "1UEnn", "1;Tvk"])
def aslist(self):
"""
return the fingerprints as a sorted list
"""
return sorted(list(self.fingerprints))
def insert(self, fingerprint):
"""
insert a new fingerprint, with possible variations
"""
if len(fingerprint) > 5:
fingerprint = fingerprint[0:5]
if self.validate(fingerprint):
self.fingerprints.add(fingerprint)
def validate(self, s):
"""
detemines if a fingerprint could be used a SQLi
"""
if len(s) == 0:
return False
if s in self.whitelist:
return True
if s in self.blacklist:
return False
# SQL Types are rarely used
if "t" in s and "f(t" not in s and "At" not in s:
return False
if "Un" in s:
return False
if "1nf" in s:
return False
if "s1o" in s:
return False
if "oo" in s:
return False
if "v,s" in s:
return False
if "s,v" in s:
return False
if "v,v" in s:
return False
if "v,1" in s:
return False
if "v,n" in s:
return False
if "n,v" in s:
return False
if "1,v" in s:
return False
if "Eo(" in s:
return False
if "(o(" in s:
return False
if "(o1" in s:
return False
if "(on" in s:
return False
if "(os" in s:
return False
if "(of" in s:
return False
if "(ov" in s:
return False
if "B(n)" in s:
return False
if "oso" in s:
return False
if "o1o" in s:
return False
if "ono" in s:
return False
# only 1 special case for this
# 1;foo:goto foo
# 1;n:k
# the 'foo' can only be a 'n' type
if ":" in s and not "n:" in s:
return False
if "11" in s:
return False
if "))" in s:
return False
if "((" in s:
return False
if "v1" in s:
return False
if "nv" in s and "T" not in s:
return False
if "nn" in s and "T" not in s:
return False
# select @version foo is legit
# but unlikely anywhere else
if "vn" in s and "Evn" not in s:
return False
if "oE" in s:
return False
if "A1" in s:
return False
if "An" in s:
return False
if "A(1" in s:
return False
if "vov" in s:
return False
if "vo1" in s:
return False
if "von" in s:
return False
if "ns" in s:
if "U" in s:
return True
if "T" in s:
return True
return False
if "sn" in s:
# that is... Tsn is ok
if s.find("T") != -1 and s.find("T") < s.find("sn"):
return True
return False
# select foo (as) bar is only nn type i know
if "nn" in s and "Enn" not in s and "T" not in s:
return False
if ",o" in s:
return False
if "kk" in s and "Tkk" not in s:
return False
if "ss" in s:
return False
if "ff" in s:
return False
if "1no" in s:
return False
if "kno" in s:
return False
if "nEk" in s:
return False
if "n(n" in s:
return False
if "1so" in s:
return False
if "1s1" in s:
return False
if "noo" in s:
return False
if "ooo" in s:
return False
if "vvv" in s:
return False
if "1vn" in s:
return False
if "1n1" in s:
return False
if "&1n" in s:
return False
if "&1v" in s:
return False
if "&1s" in s:
return False
if "nnk" in s:
return False
if "n1f" in s:
return False
# folded away
if s.startswith("("):
return False
if "&o" in s:
return False
if "1,1" in s:
return False
if "1,s" in s:
return False
if "1,n" in s:
return False
if "s,1" in s:
return False
if "s,s" in s:
return False
if "s,n" in s:
return False
if "n,1" in s:
return False
if "n,s" in s:
return False
if "n,n" in s:
return False
if "1o1" in s:
return False
if "1on" in s:
return False
if "no1" in s:
return False
if "non" in s:
return False
if "1(v" in s:
return False
if "1(n" in s:
return False
if "1(s" in s:
return False
if "1(1" in s:
return False
if "s(s" in s:
return False
if "s(n" in s:
return False
if "s(1" in s:
return False
if "s(v" in s:
return False
if "v(s" in s:
return False
if "v(n" in s:
return False
if "v(1" in s and "Tv(1" not in s:
return False
if "v(v" in s:
return False
if "TTT" in s:
return False
if s.startswith("n("):
return False
if s.startswith("vs"):
return False
if s.startswith("o"):
return False
if ")(" in s:
return False
# need to investigate T(vv) to see
# if it's correct
if "vv" in s and s != "T(vv)":
return False
# unlikely to be sqli but case FP
if s in ("so1n)", "sonoE"):
return False
return True
def permute(self, fp):
"""
generate alternative (possiblely invalid) fingerprints
"""
self.insert(fp)
# do this for safety
if len(fp) > 1 and len(fp) < 5 and fp[-1] != ";" and fp[-1] != "c":
self.insert(fp + ";")
self.insert(fp + ";c")
# do this for safety
if len(fp) > 1 and len(fp) < 5 and fp[-1] != "c":
self.insert(fp + "c")
for i in range(len(fp)):
if fp[i] == "1":
self.insert(fp[0:i] + "n" + fp[i + 1 :])
self.insert(fp[0:i] + "v" + fp[i + 1 :])
self.insert(fp[0:i] + "s" + fp[i + 1 :])
self.insert(fp[0:i] + "f(1)" + fp[i + 1 :])
self.insert(fp[0:i] + "f()" + fp[i + 1 :])
self.insert(fp[0:i] + "1os" + fp[i + 1 :])
self.insert(fp[0:i] + "1ov" + fp[i + 1 :])
self.insert(fp[0:i] + "1on" + fp[i + 1 :])
self.insert(fp[0:i] + "(1)" + fp[i + 1 :])
elif fp[i] == "s":
self.insert(fp[0:i] + "v" + fp[i + 1 :])
self.insert(fp[0:i] + "1" + fp[i + 1 :])
self.insert(fp[0:i] + "f(1)" + fp[i + 1 :])
self.insert(fp[0:i] + "f()" + fp[i + 1 :])
self.insert(fp[0:i] + "so1" + fp[i + 1 :])
self.insert(fp[0:i] + "sov" + fp[i + 1 :])
self.insert(fp[0:i] + "son" + fp[i + 1 :])
self.insert(fp[0:i] + "(s)" + fp[i + 1 :])
elif fp[i] == "v":
self.insert(fp[0:i] + "s" + fp[i + 1 :])
self.insert(fp[0:i] + "1" + fp[i + 1 :])
self.insert(fp[0:i] + "f(1)" + fp[i + 1 :])
self.insert(fp[0:i] + "f()" + fp[i + 1 :])
self.insert(fp[0:i] + "vo1" + fp[i + 1 :])
self.insert(fp[0:i] + "vos" + fp[i + 1 :])
self.insert(fp[0:i] + "von" + fp[i + 1 :])
self.insert(fp[0:i] + "(v)" + fp[i + 1 :])
elif fp[i] == "E":
# Select top, select distinct, case when
self.insert(fp[0:i] + "Ek" + fp[i + 1 :])
elif fp[i] == ")":
self.insert(fp[0:i] + "))" + fp[i + 1 :])
self.insert(fp[0:i] + ")))" + fp[i + 1 :])
self.insert(fp[0:i] + "))))" + fp[i + 1 :])
if ";E" in fp:
self.insert(fp.replace(";E", ";T"))
if fp.startswith("T"):
self.insert("1;" + fp)
self.insert("1);" + fp)
if "At" in fp:
self.insert(fp.replace("At", "As"))
if "(" in fp:
done = False
parts = []
for char in fp:
if char == "(" and done is False:
parts.append(char)
done = True
parts.append(char)
newline = "".join(parts)
self.insert(newline)
done = False
parts = []
for char in fp:
if char == "(":
if done is True:
parts.append(char)
else:
done = True
parts.append(char)
newline = "".join(parts)
self.insert(newline)
done = False
parts = []
for char in fp:
if char == "(":
parts.append(char)
parts.append(char)
newline = "".join(parts)
self.insert(newline)
def main():
"""main entrance"""
mutator = PermuteFingerprints()
for line in sys.stdin:
mutator.permute(line.strip())
for fingerprint in mutator.aslist():
print(fingerprint)
if __name__ == "__main__":
main()

View File

@ -1,136 +0,0 @@
#!/usr/bin/env python3
#
# Copyright 2012, 2013 Nick Galbreath
# nickg@client9.com
# BSD License -- see COPYING.txt for details
#
"""
Converts a libinjection JSON data file to a C header (.h) file
"""
import sys
def toc(obj):
"""main routine"""
print(
"""
#ifndef LIBINJECTION_SQLI_DATA_H
#define LIBINJECTION_SQLI_DATA_H
#include "libinjection.h"
#include "libinjection_sqli.h"
typedef struct {
const char *word;
char type;
} keyword_t;
static size_t parse_money(sfilter * sf);
static size_t parse_other(sfilter * sf);
static size_t parse_white(sfilter * sf);
static size_t parse_operator1(sfilter *sf);
static size_t parse_char(sfilter *sf);
static size_t parse_hash(sfilter *sf);
static size_t parse_dash(sfilter *sf);
static size_t parse_slash(sfilter *sf);
static size_t parse_backslash(sfilter * sf);
static size_t parse_operator2(sfilter *sf);
static size_t parse_string(sfilter *sf);
static size_t parse_word(sfilter * sf);
static size_t parse_var(sfilter * sf);
static size_t parse_number(sfilter * sf);
static size_t parse_tick(sfilter * sf);
static size_t parse_ustring(sfilter * sf);
static size_t parse_qstring(sfilter * sf);
static size_t parse_nqstring(sfilter * sf);
static size_t parse_xstring(sfilter * sf);
static size_t parse_bstring(sfilter * sf);
static size_t parse_estring(sfilter * sf);
static size_t parse_bword(sfilter * sf);
"""
)
#
# Mapping of character to function
#
fnmap = {
"CHAR_WORD": "parse_word",
"CHAR_WHITE": "parse_white",
"CHAR_OP1": "parse_operator1",
"CHAR_UNARY": "parse_operator1",
"CHAR_OP2": "parse_operator2",
"CHAR_BANG": "parse_operator2",
"CHAR_BACK": "parse_backslash",
"CHAR_DASH": "parse_dash",
"CHAR_STR": "parse_string",
"CHAR_HASH": "parse_hash",
"CHAR_NUM": "parse_number",
"CHAR_SLASH": "parse_slash",
"CHAR_SEMICOLON": "parse_char",
"CHAR_COMMA": "parse_char",
"CHAR_LEFTPARENS": "parse_char",
"CHAR_RIGHTPARENS": "parse_char",
"CHAR_LEFTBRACE": "parse_char",
"CHAR_RIGHTBRACE": "parse_char",
"CHAR_VAR": "parse_var",
"CHAR_OTHER": "parse_other",
"CHAR_MONEY": "parse_money",
"CHAR_TICK": "parse_tick",
"CHAR_UNDERSCORE": "parse_underscore",
"CHAR_USTRING": "parse_ustring",
"CHAR_QSTRING": "parse_qstring",
"CHAR_NQSTRING": "parse_nqstring",
"CHAR_XSTRING": "parse_xstring",
"CHAR_BSTRING": "parse_bstring",
"CHAR_ESTRING": "parse_estring",
"CHAR_BWORD": "parse_bword",
}
print()
print("typedef size_t (*pt2Function)(sfilter *sf);")
print("static const pt2Function char_parse_map[] = {")
pos = 0
for character in obj["charmap"]:
print(" &%s, /* %d */" % (fnmap[character], pos))
pos += 1
print("};")
print()
# keywords
# load them
keywords = obj["keywords"]
for fingerprint in list(obj["fingerprints"]):
fingerprint = "0" + fingerprint.upper()
keywords[fingerprint] = "F"
needhelp = []
for key in keywords.keys():
if key != key.upper():
needhelp.append(key)
for key in needhelp:
tmpv = keywords[key]
del keywords[key]
keywords[key.upper()] = tmpv
print("static const keyword_t sql_keywords[] = {")
for k in sorted(keywords.keys()):
if len(k) > 31:
sys.stderr.write("ERROR: keyword greater than 32 chars\n")
sys.exit(1)
print(" {\"%s\", '%s'}," % (k, keywords[k]))
print("};")
print("static const size_t sql_keywords_sz = %d;" % (len(keywords),))
print("#endif")
return 0
if __name__ == "__main__":
import json
sys.exit(toc(json.load(sys.stdin)))

File diff suppressed because it is too large Load Diff

View File

@ -1,341 +0,0 @@
local ffi = require("ffi")
local bit = require("bit")
local MMDB_MODE_MMAP = 1
local MMDB_MODE_MASK = 7
local MMDB_SUCCESS = 0
local MMDB_FILE_OPEN_ERROR = 1
local MMDB_CORRUPT_SEARCH_TREE_ERROR = 2
local MMDB_INVALID_METADATA_ERROR = 3
local MMDB_IO_ERROR = 4
local MMDB_OUT_OF_MEMORY_ERROR = 5
local MMDB_UNKNOWN_DATABASE_FORMAT_ERROR = 6
local MMDB_INVALID_DATA_ERROR = 7
local MMDB_INVALID_LOOKUP_PATH_ERROR = 8
local MMDB_LOOKUP_PATH_DOES_NOT_MATCH_DATA_ERROR = 9
local MMDB_INVALID_NODE_NUMBER_ERROR = 10
local MMDB_IPV6_LOOKUP_IN_IPV4_DATABASE_ERROR = 11
local DATA_TYPES = {
MMDB_DATA_TYPE_EXTENDED = 0,
MMDB_DATA_TYPE_POINTER = 1,
MMDB_DATA_TYPE_UTF8_STRING = 2,
MMDB_DATA_TYPE_DOUBLE = 3,
MMDB_DATA_TYPE_BYTES = 4,
MMDB_DATA_TYPE_UINT16 = 5,
MMDB_DATA_TYPE_UINT32 = 6,
MMDB_DATA_TYPE_MAP = 7,
MMDB_DATA_TYPE_INT32 = 8,
MMDB_DATA_TYPE_UINT64 = 9,
MMDB_DATA_TYPE_UINT128 = 10,
MMDB_DATA_TYPE_ARRAY = 11,
MMDB_DATA_TYPE_CONTAINER = 12,
MMDB_DATA_TYPE_END_MARKER = 13,
MMDB_DATA_TYPE_BOOLEAN = 14,
MMDB_DATA_TYPE_FLOAT = 15
}
local _list_0
do
local _accum_0 = { }
local _len_0 = 1
for k in pairs(DATA_TYPES) do
_accum_0[_len_0] = k
_len_0 = _len_0 + 1
end
_list_0 = _accum_0
end
for _index_0 = 1, #_list_0 do
local key = _list_0[_index_0]
DATA_TYPES[DATA_TYPES[key]] = key
end
ffi.cdef([[ const char *gai_strerror(int ecode);
typedef unsigned int mmdb_uint128_t __attribute__ ((__mode__(TI)));
typedef struct MMDB_entry_s {
const struct MMDB_s *mmdb;
uint32_t offset;
} MMDB_entry_s;
typedef struct MMDB_lookup_result_s {
bool found_entry;
MMDB_entry_s entry;
uint16_t netmask;
} MMDB_lookup_result_s;
typedef struct MMDB_entry_data_s {
bool has_data;
union {
uint32_t pointer;
const char *utf8_string;
double double_value;
const uint8_t *bytes;
uint16_t uint16;
uint32_t uint32;
int32_t int32;
uint64_t uint64;
mmdb_uint128_t uint128;
bool boolean;
float float_value;
};
/* This is a 0 if a given entry cannot be found. This can only happen
* when a call to MMDB_(v)get_value() asks for hash keys or array
* indices that don't exist. */
uint32_t offset;
/* This is the next entry in the data section, but it's really only
* relevant for entries that part of a larger map or array
* struct. There's no good reason for an end user to look at this
* directly. */
uint32_t offset_to_next;
/* This is only valid for strings, utf8_strings or binary data */
uint32_t data_size;
/* This is an MMDB_DATA_TYPE_* constant */
uint32_t type;
} MMDB_entry_data_s;
typedef struct MMDB_entry_data_list_s {
MMDB_entry_data_s entry_data;
struct MMDB_entry_data_list_s *next;
void *pool;
} MMDB_entry_data_list_s;
typedef struct MMDB_description_s {
const char *language;
const char *description;
} MMDB_description_s;
typedef struct MMDB_metadata_s {
uint32_t node_count;
uint16_t record_size;
uint16_t ip_version;
const char *database_type;
struct {
size_t count;
const char **names;
} languages;
uint16_t binary_format_major_version;
uint16_t binary_format_minor_version;
uint64_t build_epoch;
struct {
size_t count;
MMDB_description_s **descriptions;
} description;
/* See above warning before adding fields */
} MMDB_metadata_s;
typedef struct MMDB_ipv4_start_node_s {
uint16_t netmask;
uint32_t node_value;
/* See above warning before adding fields */
} MMDB_ipv4_start_node_s;
typedef struct MMDB_s {
uint32_t flags;
const char *filename;
ssize_t file_size;
const uint8_t *file_content;
const uint8_t *data_section;
uint32_t data_section_size;
const uint8_t *metadata_section;
uint32_t metadata_section_size;
uint16_t full_record_byte_size;
uint16_t depth;
MMDB_ipv4_start_node_s ipv4_start_node;
MMDB_metadata_s metadata;
/* See above warning before adding fields */
} MMDB_s;
extern int MMDB_open(const char *const filename, uint32_t flags,
MMDB_s *const mmdb);
extern void MMDB_close(MMDB_s *const mmdb);
extern MMDB_lookup_result_s MMDB_lookup_string(const MMDB_s *const mmdb,
const char *const ipstr,
int *const gai_error,
int *const mmdb_error);
extern const char *MMDB_strerror(int error_code);
extern int MMDB_get_entry_data_list(
MMDB_entry_s *start, MMDB_entry_data_list_s **const entry_data_list);
extern void MMDB_free_entry_data_list(
MMDB_entry_data_list_s *const entry_data_list);
extern int MMDB_get_value(MMDB_entry_s *const start,
MMDB_entry_data_s *const entry_data,
...);
]])
local lib = ffi.load("/opt/bunkerweb/deps/lib/libmaxminddb.so")
local consume_map, consume_array
local consume_value
consume_value = function(current)
if current == nil then
return nil, "expected value but go nothing"
end
local entry_data = current.entry_data
local _exp_0 = entry_data.type
if DATA_TYPES.MMDB_DATA_TYPE_MAP == _exp_0 then
return assert(consume_map(current))
elseif DATA_TYPES.MMDB_DATA_TYPE_ARRAY == _exp_0 then
return assert(consume_array(current))
elseif DATA_TYPES.MMDB_DATA_TYPE_UTF8_STRING == _exp_0 then
local value = ffi.string(entry_data.utf8_string, entry_data.data_size)
return value, current.next
elseif DATA_TYPES.MMDB_DATA_TYPE_UINT32 == _exp_0 then
local value = entry_data.uint32
return value, current.next
elseif DATA_TYPES.MMDB_DATA_TYPE_UINT16 == _exp_0 then
local value = entry_data.uint16
return value, current.next
elseif DATA_TYPES.MMDB_DATA_TYPE_INT32 == _exp_0 then
local value = entry_data.int32
return value, current.next
elseif DATA_TYPES.MMDB_DATA_TYPE_UINT64 == _exp_0 then
local value = entry_data.uint64
return value, current.next
elseif DATA_TYPES.MMDB_DATA_TYPE_DOUBLE == _exp_0 then
local value = entry_data.double_value
return value, current.next
elseif DATA_TYPES.MMDB_DATA_TYPE_BOOLEAN == _exp_0 then
assert(entry_data.boolean ~= nil)
local value = entry_data.boolean
return value, current.next
else
error("unknown type: " .. tostring(DATA_TYPES[entry_data.type]))
return nil, current.next
end
end
consume_map = function(current)
local out = { }
local map = current.entry_data
local tuple_count = map.data_size
current = current.next
while tuple_count > 0 do
local key
key, current = assert(consume_value(current))
local value
value, current = consume_value(current)
out[key] = value
tuple_count = tuple_count - 1
end
return out, current
end
consume_array = function(current)
local out = { }
local array = current.entry_data
local length = array.data_size
current = current.next
while length > 0 do
local value
value, current = assert(consume_value(current))
table.insert(out, value)
length = length - 1
end
return out, current
end
local Mmdb
do
local _class_0
local _base_0 = {
load = function(self)
self.mmdb = ffi.new("MMDB_s")
local res = lib.MMDB_open(self.file_path, 0, self.mmdb)
if not (res == MMDB_SUCCESS) then
return nil, "failed to load db: " .. tostring(self.file_path)
end
ffi.gc(self.mmdb, (assert(lib.MMDB_close, "missing destructor")))
return true
end,
_lookup_string = function(self, ip)
assert(self.mmdb, "mmdb database is not loaded")
local gai_error = ffi.new("int[1]")
local mmdb_error = ffi.new("int[1]")
local res = lib.MMDB_lookup_string(self.mmdb, ip, gai_error, mmdb_error)
if not (gai_error[0] == MMDB_SUCCESS) then
return nil, "gai error: " .. tostring(ffi.string(lib.gai_strerror(gai_error[0])))
end
if not (mmdb_error[0] == MMDB_SUCCESS) then
return nil, "mmdb error: " .. tostring(ffi.string(lib.MMDB_strerror(mmdb_error[0])))
end
if not (res.found_entry) then
return nil, "failed to find entry"
end
return res
end,
lookup_value = function(self, ip, ...)
assert((...), "missing path")
local path = {
...
}
table.insert(path, 0)
local res, err = self:_lookup_string(ip)
if not (res) then
return nil, err
end
local entry_data = ffi.new("MMDB_entry_data_s")
local status = lib.MMDB_get_value(res.entry, entry_data, unpack(path))
if MMDB_SUCCESS ~= status then
return nil, "failed to find field by path"
end
if entry_data.has_data then
local _exp_0 = entry_data.type
if DATA_TYPES.MMDB_DATA_TYPE_MAP == _exp_0 or DATA_TYPES.MMDB_DATA_TYPE_ARRAY == _exp_0 then
return nil, "path holds object, not value"
end
local value = assert(consume_value({
entry_data = entry_data
}))
return value
else
return nil, "entry has no data"
end
end,
lookup = function(self, ip)
local res, err = self:_lookup_string(ip)
if not (res) then
return nil, err
end
local entry_data_list = ffi.new("MMDB_entry_data_list_s*[1]")
local status = lib.MMDB_get_entry_data_list(res.entry, entry_data_list)
if not (status == MMDB_SUCCESS) then
return nil, "failed to load data: " .. tostring(ffi.string(lib.MMDB_strerror(status)))
end
ffi.gc(entry_data_list[0], (assert(lib.MMDB_free_entry_data_list, "missing destructor")))
local current = entry_data_list[0]
local value = assert(consume_value(current))
return value
end
}
_base_0.__index = _base_0
_class_0 = setmetatable({
__init = function(self, file_path, opts)
self.file_path, self.opts = file_path, opts
end,
__base = _base_0,
__name = "Mmdb"
}, {
__index = _base_0,
__call = function(cls, ...)
local _self_0 = setmetatable({}, _base_0)
cls.__init(_self_0, ...)
return _self_0
end
})
_base_0.__class = _class_0
Mmdb = _class_0
end
local load_database
load_database = function(filename)
local mmdb = Mmdb(filename)
local success, err = mmdb:load()
if not (success) then
return nil, err
end
return mmdb
end
return {
Mmdb = Mmdb,
load_database = load_database,
VERSION = require("geoip.version")
}

View File

@ -1,5 +0,0 @@
#!/bin/sh
echo Content-type: text/plain
echo
env

View File

@ -1,193 +0,0 @@
from glob import glob
from json import loads
from logging import Logger
from re import search as re_search
from sys import path as sys_path
from traceback import format_exc
from typing import Union
sys_path.append("/opt/bunkerweb/utils")
class Configurator:
def __init__(
self,
settings: str,
core: Union[str, dict],
plugins: str,
variables: Union[str, dict],
logger: Logger,
):
self.__logger = logger
self.__settings = self.__load_settings(settings)
if isinstance(core, str):
self.__core = self.__load_plugins(core)
else:
self.__core = core
self.__plugins_settings = []
self.__plugins = self.__load_plugins(plugins, "plugins")
if isinstance(variables, str):
self.__variables = self.__load_variables(variables)
else:
self.__variables = variables
self.__multisite = (
"MULTISITE" in self.__variables and self.__variables["MULTISITE"] == "yes"
)
self.__servers = self.__map_servers()
def get_settings(self):
return self.__settings
def get_plugins_settings(self):
return self.__plugins_settings
def __map_servers(self):
if not self.__multisite or not "SERVER_NAME" in self.__variables:
return {}
servers = {}
for server_name in self.__variables["SERVER_NAME"].split(" "):
if not re_search(self.__settings["SERVER_NAME"]["regex"], server_name):
self.__logger.warning(
f"Ignoring server name {server_name} because regex is not valid",
)
continue
names = [server_name]
if f"{server_name}_SERVER_NAME" in self.__variables:
if not re_search(
self.__settings["SERVER_NAME"]["regex"],
self.__variables[f"{server_name}_SERVER_NAME"],
):
self.__logger.warning(
f"Ignoring {server_name}_SERVER_NAME because regex is not valid",
)
else:
names = self.__variables[f"{server_name}_SERVER_NAME"].split(" ")
servers[server_name] = names
return servers
def __load_settings(self, path):
with open(path) as f:
return loads(f.read())
def __load_plugins(self, path, type: str = "other"):
plugins = {}
files = glob(f"{path}/*/plugin.json")
for file in files:
try:
with open(file) as f:
data = loads(f.read())
if type == "plugins":
self.__plugins_settings.append(data)
plugins.update(data["settings"])
except:
self.__logger.error(
f"Exception while loading JSON from {file} : {format_exc()}",
)
return plugins
def __load_variables(self, path):
variables = {}
with open(path) as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if line.startswith("#") or line == "" or not "=" in line:
continue
var = line.split("=")[0]
value = line[len(var) + 1 :]
variables[var] = value
return variables
def get_config(self):
config = {}
# Extract default settings
default_settings = [self.__settings, self.__core, self.__plugins]
for settings in default_settings:
for setting, data in settings.items():
config[setting] = data["default"]
# Override with variables
for variable, value in self.__variables.items():
ret, err = self.__check_var(variable)
if ret:
config[variable] = value
elif not variable.startswith("PYTHON") and variable not in (
"GPG_KEY",
"LANG",
"PATH",
"NGINX_VERSION",
"NJS_VERSION",
"PKG_RELEASE",
"DOCKER_HOST",
):
self.__logger.warning(f"Ignoring variable {variable} : {err}")
# Expand variables to each sites if MULTISITE=yes and if not present
if config.get("MULTISITE", "no") == "yes":
for server_name in config["SERVER_NAME"].split(" "):
if server_name == "":
continue
for settings in default_settings:
for setting, data in settings.items():
if data["context"] == "global":
continue
key = f"{server_name}_{setting}"
if key not in config:
if setting == "SERVER_NAME":
config[key] = server_name
elif setting in config:
config[key] = config[setting]
return config
def __check_var(self, variable):
value = self.__variables[variable]
# MULTISITE=no
if not self.__multisite:
where, real_var = self.__find_var(variable)
if not where:
return False, f"variable name {variable} doesn't exist"
if not "regex" in where[real_var]:
return False, f"missing regex for variable {variable}"
if not re_search(where[real_var]["regex"], value):
return (
False,
f"value {value} doesn't match regex {where[real_var]['regex']}",
)
return True, "ok"
# MULTISITE=yes
prefixed, real_var = self.__var_is_prefixed(variable)
where, real_var = self.__find_var(real_var)
if not where:
return False, f"variable name {variable} doesn't exist"
if prefixed and where[real_var]["context"] != "multisite":
return False, f"context of {variable} isn't multisite"
if not re_search(where[real_var]["regex"], value):
return (
False,
f"value {value} doesn't match regex {where[real_var]['regex']}",
)
return True, "ok"
def __find_var(self, variable):
targets = [self.__settings, self.__core, self.__plugins]
for target in targets:
if variable in target:
return target, variable
for real_var, settings in target.items():
if "multiple" in settings and re_search(
f"^{real_var}_[0-9]+$", variable
):
return target, real_var
return False, variable
def __var_is_prefixed(self, variable):
for server in self.__servers:
if variable.startswith(f"{server}_"):
return True, variable.replace(f"{server}_", "", 1)
return False, variable

View File

@ -1,193 +0,0 @@
#!/usr/bin/python3
from argparse import ArgumentParser
from glob import glob
from json import loads
from os import R_OK, W_OK, X_OK, access, getenv, path, remove, unlink
from os.path import exists, isdir, isfile, islink
from shutil import rmtree
from subprocess import DEVNULL, STDOUT, run
from sys import exit as sys_exit, path as sys_path
from time import sleep
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
sys_path.append("/opt/bunkerweb/api")
sys_path.append("/opt/bunkerweb/db")
from logger import setup_logger
from Database import Database
from Configurator import Configurator
from Templator import Templator
if __name__ == "__main__":
logger = setup_logger("Generator", getenv("LOG_LEVEL", "INFO"))
wait_retry_interval = int(getenv("WAIT_RETRY_INTERVAL", "5"))
try:
# Parse arguments
parser = ArgumentParser(description="BunkerWeb config generator")
parser.add_argument(
"--settings",
default="/opt/bunkerweb/settings.json",
type=str,
help="file containing the main settings",
)
parser.add_argument(
"--templates",
default="/opt/bunkerweb/confs",
type=str,
help="directory containing the main template files",
)
parser.add_argument(
"--core",
default="/opt/bunkerweb/core",
type=str,
help="directory containing the core plugins",
)
parser.add_argument(
"--plugins",
default="/opt/bunkerweb/plugins",
type=str,
help="directory containing the external plugins",
)
parser.add_argument(
"--output",
default="/etc/nginx",
type=str,
help="where to write the rendered files",
)
parser.add_argument(
"--target",
default="/etc/nginx",
type=str,
help="where nginx will search for configurations files",
)
parser.add_argument(
"--variables",
type=str,
help="path to the file containing environment variables",
)
args = parser.parse_args()
logger.info("Generator started ...")
logger.info(f"Settings : {args.settings}")
logger.info(f"Templates : {args.templates}")
logger.info(f"Core : {args.core}")
logger.info(f"Plugins : {args.plugins}")
logger.info(f"Output : {args.output}")
logger.info(f"Target : {args.target}")
logger.info(f"Variables : {args.variables}")
integration = "Linux"
if getenv("KUBERNETES_MODE", "no") == "yes":
integration = "Kubernetes"
elif getenv("SWARM_MODE", "no") == "yes":
integration = "Swarm"
elif getenv("AUTOCONF_MODE", "no") == "yes":
integration = "Autoconf"
elif exists("/opt/bunkerweb/INTEGRATION"):
with open("/opt/bunkerweb/INTEGRATION", "r") as f:
integration = f.read().strip()
if args.variables:
# Check existences and permissions
logger.info("Checking arguments ...")
files = [args.settings, args.variables]
paths_rx = [args.core, args.plugins, args.templates]
paths_rwx = [args.output]
for file in files:
if not path.exists(file):
logger.error(f"Missing file : {file}")
sys_exit(1)
if not access(file, R_OK):
logger.error(f"Can't read file : {file}")
sys_exit(1)
for _path in paths_rx + paths_rwx:
if not path.isdir(_path):
logger.error(f"Missing directory : {_path}")
sys_exit(1)
if not access(_path, R_OK | X_OK):
logger.error(
f"Missing RX rights on directory : {_path}",
)
sys_exit(1)
for _path in paths_rwx:
if not access(_path, W_OK):
logger.error(
f"Missing W rights on directory : {_path}",
)
sys_exit(1)
# Compute the config
logger.info("Computing config ...")
config = Configurator(
args.settings, args.core, args.plugins, args.variables, logger
)
config = config.get_config()
else:
db = Database(
logger,
sqlalchemy_string=getenv("DATABASE_URI", None),
)
config = db.get_config()
# Remove old files
logger.info("Removing old files ...")
files = glob(f"{args.output}/*")
for file in files:
if islink(file):
unlink(file)
elif isfile(file):
remove(file)
elif isdir(file):
rmtree(file, ignore_errors=False)
# Render the templates
logger.info("Rendering templates ...")
templator = Templator(
args.templates,
args.core,
args.plugins,
args.output,
args.target,
config,
)
templator.render()
if integration == "Linux":
retries = 0
while not exists("/opt/bunkerweb/tmp/nginx.pid"):
if retries == 5:
logger.error(
"BunkerWeb's nginx didn't start in time.",
)
sys_exit(1)
logger.warning(
"Waiting for BunkerWeb's nginx to start, retrying in 5 seconds ...",
)
retries += 1
sleep(5)
cmd = "/usr/sbin/nginx -s reload"
proc = run(cmd.split(" "), stdin=DEVNULL, stderr=STDOUT)
if proc.returncode != 0:
status = 1
logger.error("Error while reloading nginx")
else:
logger.info("Successfully reloaded nginx")
except SystemExit as e:
sys_exit(e)
except:
logger.error(
f"Exception while executing generator : {format_exc()}",
)
sys_exit(1)
# We're done
logger.info("Generator successfully executed !")

View File

@ -1,381 +0,0 @@
#!/usr/bin/python3
from argparse import ArgumentParser
from glob import glob
from itertools import chain
from json import loads
from os import R_OK, W_OK, X_OK, access, environ, getenv, path
from os.path import exists
from re import compile as re_compile
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
from typing import Any
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
sys_path.append("/opt/bunkerweb/api")
sys_path.append("/opt/bunkerweb/db")
from docker import DockerClient
from kubernetes import client as kube_client
from logger import setup_logger
from Database import Database
from Configurator import Configurator
from API import API
custom_confs_rx = re_compile(
r"^([0-9a-z\.\-]*)_?CUSTOM_CONF_(HTTP|DEFAULT_SERVER_HTTP|SERVER_HTTP|MODSEC|MODSEC_CRS)_(.+)$"
)
def get_instance_configs_and_apis(instance: Any, db, _type="Docker"):
api_http_port = None
api_server_name = None
tmp_config = {}
custom_confs = []
apis = []
for var in (
instance.attrs["Config"]["Env"]
if _type == "Docker"
else instance.attrs["Spec"]["TaskTemplate"]["ContainerSpec"]["Env"]
):
splitted = var.split("=", 1)
if custom_confs_rx.match(splitted[0]):
custom_confs.append(
{
"value": splitted[1],
"exploded": custom_confs_rx.search(splitted[0]).groups(),
}
)
else:
tmp_config[splitted[0]] = splitted[1]
if db is None and splitted[0] == "DATABASE_URI":
db = Database(
logger,
sqlalchemy_string=splitted[1],
)
elif splitted[0] == "API_HTTP_PORT":
api_http_port = splitted[1]
elif splitted[0] == "API_SERVER_NAME":
api_server_name = splitted[1]
apis.append(
API(
f"http://{instance.name}:{api_http_port or getenv('API_HTTP_PORT', '5000')}",
host=api_server_name or getenv("API_SERVER_NAME", "bwapi"),
)
)
return tmp_config, custom_confs, apis, db
if __name__ == "__main__":
logger = setup_logger("Generator", getenv("LOG_LEVEL", "INFO"))
wait_retry_interval = int(getenv("WAIT_RETRY_INTERVAL", "5"))
try:
# Parse arguments
parser = ArgumentParser(description="BunkerWeb config saver")
parser.add_argument(
"--settings",
default="/opt/bunkerweb/settings.json",
type=str,
help="file containing the main settings",
)
parser.add_argument(
"--core",
default="/opt/bunkerweb/core",
type=str,
help="directory containing the core plugins",
)
parser.add_argument(
"--plugins",
default="/opt/bunkerweb/plugins",
type=str,
help="directory containing the external plugins",
)
parser.add_argument(
"--variables",
type=str,
help="path to the file containing environment variables",
)
parser.add_argument(
"--init",
action="store_true",
help="Only initialize the database",
)
args = parser.parse_args()
logger.info("Save config started ...")
logger.info(f"Settings : {args.settings}")
logger.info(f"Core : {args.core}")
logger.info(f"Plugins : {args.plugins}")
logger.info(f"Init : {args.init}")
integration = "Linux"
if getenv("KUBERNETES_MODE", "no") == "yes":
integration = "Kubernetes"
elif getenv("SWARM_MODE", "no") == "yes":
integration = "Swarm"
elif getenv("AUTOCONF_MODE", "no") == "yes":
integration = "Autoconf"
elif exists("/opt/bunkerweb/INTEGRATION"):
with open("/opt/bunkerweb/INTEGRATION", "r") as f:
integration = f.read().strip()
logger.info(f"Detected {integration} integration")
config_files = None
db = None
# Check existences and permissions
logger.info("Checking arguments ...")
files = [args.settings] + ([args.variables] if args.variables else [])
paths_rx = [args.core, args.plugins]
for file in files:
if not path.exists(file):
logger.error(f"Missing file : {file}")
sys_exit(1)
if not access(file, R_OK):
logger.error(f"Can't read file : {file}")
sys_exit(1)
for _path in paths_rx:
if not path.isdir(_path):
logger.error(f"Missing directory : {_path}")
sys_exit(1)
if not access(_path, R_OK | X_OK):
logger.error(
f"Missing RX rights on directory : {_path}",
)
sys_exit(1)
# Check core plugins orders
logger.info("Checking core plugins orders ...")
core_plugins = {}
files = glob(f"{args.core}/*/plugin.json")
for file in files:
try:
with open(file) as f:
core_plugin = loads(f.read())
if core_plugin["order"] not in core_plugins:
core_plugins[core_plugin["order"]] = []
core_plugins[core_plugin["order"]].append(core_plugin)
except:
logger.error(
f"Exception while loading JSON from {file} : {format_exc()}",
)
core_settings = {}
for order in core_plugins:
if len(core_plugins[order]) > 1 and order != 999:
logger.warning(
f"Multiple plugins have the same order ({order}) : {', '.join(plugin['id'] for plugin in core_plugins[order])}. Therefor, the execution order will be random.",
)
for plugin in core_plugins[order]:
core_settings.update(plugin["settings"])
if args.variables:
logger.info(f"Variables : {args.variables}")
# Compute the config
logger.info("Computing config ...")
config = Configurator(
args.settings, core_settings, args.plugins, args.variables, logger
)
config_files = config.get_config()
custom_confs = [
{"value": v, "exploded": custom_confs_rx.search(k).groups()}
for k, v in environ.items()
if custom_confs_rx.match(k)
]
elif integration == "Kubernetes":
corev1 = kube_client.CoreV1Api()
tmp_config = {}
apis = []
for pod in corev1.list_pod_for_all_namespaces(watch=False).items:
if (
pod.metadata.annotations != None
and "bunkerweb.io/INSTANCE" in pod.metadata.annotations
):
api_http_port = None
api_server_name = None
for pod_env in pod.spec.containers[0].env:
tmp_config[pod_env.name] = pod_env.value
if db is None and pod_env.name == "DATABASE_URI":
db = Database(
logger,
sqlalchemy_string=pod_env.value,
)
elif pod_env.name == "API_HTTP_PORT":
api_http_port = pod_env.value
elif pod_env.name == "API_SERVER_NAME":
api_server_name = pod_env.value
apis.append(
API(
f"http://{pod.status.pod_ip}:{api_http_port or getenv('API_HTTP_PORT', '5000')}",
host=api_server_name or getenv("API_SERVER_NAME", "bwapi"),
)
)
supported_config_types = [
"http",
"stream",
"server-http",
"server-stream",
"default-server-http",
"modsec",
"modsec-crs",
]
custom_confs = []
for configmap in corev1.list_config_map_for_all_namespaces(
watch=False
).items:
if (
configmap.metadata.annotations is None
or "bunkerweb.io/CONFIG_TYPE" not in configmap.metadata.annotations
):
continue
config_type = configmap.metadata.annotations["bunkerweb.io/CONFIG_TYPE"]
if config_type not in supported_config_types:
logger.warning(
f"Ignoring unsupported CONFIG_TYPE {config_type} for ConfigMap {configmap.metadata.name}",
)
continue
elif not configmap.data:
logger.warning(
f"Ignoring blank ConfigMap {configmap.metadata.name}",
)
continue
config_site = ""
if "bunkerweb.io/CONFIG_SITE" in configmap.metadata.annotations:
config_site = (
f"{configmap.metadata.annotations['bunkerweb.io/CONFIG_SITE']}/"
)
for config_name, config_data in configmap.data.items():
custom_confs.append(
{
"value": config_data,
"exploded": (config_site, config_type, config_name),
}
)
else:
docker_client = DockerClient(
base_url=getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
)
tmp_config = {}
custom_confs = []
apis = []
for instance in (
docker_client.containers.list(filters={"label": "bunkerweb.INSTANCE"})
if integration == "Docker"
else docker_client.services.list(
filters={"label": "bunkerweb.INSTANCE"}
)
):
conf, cstm_confs, tmp_apis, tmp_db = get_instance_configs_and_apis(
instance, db, integration
)
tmp_config.update(conf)
custom_confs.extend(cstm_confs)
apis.extend(tmp_apis)
if db is None:
db = tmp_db
if db is None:
db = Database(logger)
# Compute the config
if config_files is None:
logger.info("Computing config ...")
config = Configurator(
args.settings, core_settings, args.plugins, tmp_config, logger
)
config_files = config.get_config()
if not db.is_initialized():
logger.info(
"Database not initialized, initializing ...",
)
ret, err = db.init_tables(
[
config.get_settings(),
list(chain.from_iterable(core_plugins.values())),
config.get_plugins_settings(),
]
)
# Initialize database tables
if err:
logger.error(
f"Exception while initializing database : {err}",
)
sys_exit(1)
elif ret is False:
logger.info(
"Database tables are already initialized, skipping creation ...",
)
else:
logger.info("Database tables initialized")
with open("/opt/bunkerweb/VERSION", "r") as f:
version = f.read().strip()
err = db.initialize_db(version=version, integration=integration)
if err:
logger.error(
f"Can't Initialize database : {err}",
)
sys_exit(1)
else:
logger.info("Database initialized")
else:
logger.info(
"Database is already initialized, skipping ...",
)
if args.init:
sys_exit(0)
err = db.save_config(config_files, "scheduler")
if not err:
err1 = db.save_custom_configs(custom_confs, "scheduler")
else:
err = None
err1 = None
if err or err1:
logger.error(
f"Can't save config to database : {err or err1}",
)
sys_exit(1)
else:
logger.info("Config successfully saved to database")
except SystemExit as e:
sys_exit(e)
except:
logger.error(
f"Exception while executing config saver : {format_exc()}",
)
sys_exit(1)
# We're done
logger.info("Config saver successfully executed !")

View File

@ -1,5 +0,0 @@
#!/bin/bash
/opt/bunkerweb/cli/main.py "$@"
exit $?

View File

@ -1,41 +0,0 @@
#!/bin/bash
. /opt/bunkerweb/helpers/utils.sh
log "$1" "" "Setup and check /data folder ..."
# Create folders if missing and check permissions
rwx_folders=("cache" "cache/letsencrypt")
rx_folders=("configs" "configs/http" "configs/stream" "configs/server-http" "configs/server-stream" "configs/default-server-http" "configs/default-server-stream" "configs/modsec" "configs/modsec-crs" "plugins" "www")
for folder in "${rwx_folders[@]}" ; do
if [ ! -d "/data/${folder}" ] ; then
mkdir -p "/data/${folder}"
if [ $? -ne 0 ] ; then
log "$1" "❌" "Wrong permissions on /data (RWX needed for user nginx with uid 101 and gid 101)"
exit 1
fi
elif [ ! -r "/data/${folder}" ] || [ ! -w "/data/${folder}" ] || [ ! -x "/data/${folder}" ] ; then
log "$1" "❌" "Wrong permissions on /data/${folder} (RWX needed for user nginx with uid 101 and gid 101)"
exit 1
fi
done
for folder in "${rx_folders[@]}" ; do
if [ ! -d "/data/${folder}" ] ; then
mkdir -p "/data/${folder}"
if [ $? -ne 0 ] ; then
log "$1" "❌" "Wrong permissions on /data (RWX needed for user nginx with uid 101 and gid 101)"
exit 1
fi
elif [ ! -r "/data/${folder}" ] || [ ! -x "/data/${folder}" ] ; then
log "$1" "❌" "Wrong permissions on /data/${folder} (RX needed for user nginx with uid 101 and gid 101)"
exit 1
fi
done
# Check permissions on files
IFS=$'\n'
for file in $(find /data -type f) ; do
if [ ! -r "${file}" ] ; then
log "$1" "❌" "Wrong permissions on ${file} (at least R needed for user nginx with uid 101 and gid 101)"
exit 1
fi
done

View File

@ -1,63 +0,0 @@
#!/bin/bash
. /opt/bunkerweb/helpers/utils.sh
log "ENTRYPOINT" "" "Starting BunkerWeb v$(cat /opt/bunkerweb/VERSION) ..."
# setup and check /data folder
/opt/bunkerweb/helpers/data.sh "ENTRYPOINT"
# trap SIGTERM and SIGINT
function trap_exit() {
log "ENTRYPOINT" "" "Catched stop operation"
log "ENTRYPOINT" "" "Stopping nginx ..."
/usr/sbin/nginx -s stop
}
trap "trap_exit" TERM INT QUIT
# trap SIGHUP
function trap_reload() {
log "ENTRYPOINT" "" "Catched reload operation"
if [ -f /opt/bunkerweb/tmp/nginx.pid ] ; then
log "ENTRYPOINT" "" "Reloading nginx ..."
nginx -s reload
if [ $? -eq 0 ] ; then
log "ENTRYPOINT" "" "Reload successful"
else
log "ENTRYPOINT" "❌" "Reload failed"
fi
else
log "ENTRYPOINT" "⚠️" "Ignored reload operation because nginx is not running"
fi
}
trap "trap_reload" HUP
if [ "$SWARM_MODE" == "yes" ] ; then
echo "Swarm" > /opt/bunkerweb/INTEGRATION
elif [ "$KUBERNETES_MODE" == "yes" ] ; then
echo "Kubernetes" > /opt/bunkerweb/INTEGRATION
elif [ "$AUTOCONF_MODE" == "yes" ] ; then
echo "Autoconf" > /opt/bunkerweb/INTEGRATION
fi
if [ -f "/etc/nginx/variables.env" ] ; then
log "ENTRYPOINT" "⚠️ " "Looks like BunkerWeb has already been loaded, will not generate temp config"
else
# generate "temp" config
echo -e "IS_LOADING=yes\nSERVER_NAME=\nAPI_HTTP_PORT=${API_HTTP_PORT:-5000}\nAPI_SERVER_NAME=${API_SERVER_NAME:-bwapi}\nAPI_WHITELIST_IP=${API_WHITELIST_IP:-127.0.0.0/8}" > /tmp/variables.env
python3 /opt/bunkerweb/gen/main.py --variables /tmp/variables.env
fi
# start nginx
log "ENTRYPOINT" "" "Starting nginx ..."
nginx -g "daemon off;" &
pid="$!"
# wait while nginx is running
wait "$pid"
while [ -f "/opt/bunkerweb/tmp/nginx.pid" ] ; do
wait "$pid"
done
log "ENTRYPOINT" "" "BunkerWeb stopped"
exit 0

View File

@ -1,16 +0,0 @@
#!/bin/bash
if [ -f /opt/bunkerweb/tmp/nginx-temp.pid ] ; then
exit 1
fi
if [ ! -f /opt/bunkerweb/tmp/nginx.pid ] ; then
exit 1
fi
check="$(curl -s -H "Host: healthcheck.bunkerweb.io" http://127.0.0.1:6000/healthz 2>&1)"
if [ $? -ne 0 ] || [ "$check" != "ok" ] ; then
exit 1
fi
exit 0

View File

@ -1,649 +0,0 @@
#!/bin/bash
NGINX_VERSION="${NGINX_VERSION-1.20.2}"
BUILD_MODE="${BUILD_MODE-prod}"
function git_secure_checkout() {
if [ "$CHANGE_DIR" != "" ] ; then
cd "$CHANGE_DIR"
fi
path="$1"
commit="$2"
cd "$path"
output="$(git checkout "${commit}^{commit}" 2>&1)"
if [ $? -ne 0 ] ; then
echo "❌ Commit hash $commit is absent from submodules $path !"
echo "$output"
cleanup
exit 4
fi
}
function git_secure_clone() {
cd /tmp/bunkerweb
repo="$1"
commit="$2"
folder="$(echo "$repo" | sed -E "s@https://github.com/.*/(.*)\.git@\1@")"
output="$(git clone "$repo" 2>&1)"
if [ $? -ne 0 ] ; then
echo "❌ Error cloning $1"
echo "$output"
cleanup
exit 2
fi
cd "$folder"
output="$(git checkout "${commit}^{commit}" 2>&1)"
if [ $? -ne 0 ] ; then
echo "❌ Commit hash $commit is absent from repository $repo"
echo "$output"
cleanup
exit 3
fi
}
function secure_download() {
cd /tmp/bunkerweb
link="$1"
file="$2"
hash="$3"
output="$(wget -q -O "$file" "$link" 2>&1)"
if [ $? -ne 0 ] ; then
echo "❌ Error downloading $link"
echo "$output"
cleanup
exit 5
fi
check="$(sha512sum "$file" | cut -d ' ' -f 1)"
if [ "$check" != "$hash" ] ; then
echo "❌️ Wrong hash from file $link (expected $hash got $check)"
cleanup
exit 6
fi
}
function do_and_check_cmd() {
if [ "$CHANGE_DIR" != "" ] ; then
cd "$CHANGE_DIR"
fi
output=$("$@" 2>&1)
ret="$?"
if [ $ret -ne 0 ] ; then
echo "❌ Error from command : $*"
echo "$output"
cleanup
exit $ret
fi
#echo $output
return 0
}
function cleanup() {
echo " Cleaning /tmp/bunkerweb"
rm -rf /tmp/bunkerweb
}
function get_sign_repo_key() {
key="-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v2.0.22 (GNU/Linux)
mQENBE5OMmIBCAD+FPYKGriGGf7NqwKfWC83cBV01gabgVWQmZbMcFzeW+hMsgxH
W6iimD0RsfZ9oEbfJCPG0CRSZ7ppq5pKamYs2+EJ8Q2ysOFHHwpGrA2C8zyNAs4I
QxnZZIbETgcSwFtDun0XiqPwPZgyuXVm9PAbLZRbfBzm8wR/3SWygqZBBLdQk5TE
fDR+Eny/M1RVR4xClECONF9UBB2ejFdI1LD45APbP2hsN/piFByU1t7yK2gpFyRt
97WzGHn9MV5/TL7AmRPM4pcr3JacmtCnxXeCZ8nLqedoSuHFuhwyDnlAbu8I16O5
XRrfzhrHRJFM1JnIiGmzZi6zBvH0ItfyX6ttABEBAAG0KW5naW54IHNpZ25pbmcg
a2V5IDxzaWduaW5nLWtleUBuZ2lueC5jb20+iQE+BBMBAgAoAhsDBgsJCAcDAgYV
CAIJCgsEFgIDAQIeAQIXgAUCV2K1+AUJGB4fQQAKCRCr9b2Ce9m/YloaB/9XGrol
kocm7l/tsVjaBQCteXKuwsm4XhCuAQ6YAwA1L1UheGOG/aa2xJvrXE8X32tgcTjr
KoYoXWcdxaFjlXGTt6jV85qRguUzvMOxxSEM2Dn115etN9piPl0Zz+4rkx8+2vJG
F+eMlruPXg/zd88NvyLq5gGHEsFRBMVufYmHtNfcp4okC1klWiRIRSdp4QY1wdrN
1O+/oCTl8Bzy6hcHjLIq3aoumcLxMjtBoclc/5OTioLDwSDfVx7rWyfRhcBzVbwD
oe/PD08AoAA6fxXvWjSxy+dGhEaXoTHjkCbz/l6NxrK3JFyauDgU4K4MytsZ1HDi
MgMW8hZXxszoICTTiQEcBBABAgAGBQJOTkelAAoJEKZP1bF62zmo79oH/1XDb29S
YtWp+MTJTPFEwlWRiyRuDXy3wBd/BpwBRIWfWzMs1gnCjNjk0EVBVGa2grvy9Jtx
JKMd6l/PWXVucSt+U/+GO8rBkw14SdhqxaS2l14v6gyMeUrSbY3XfToGfwHC4sa/
Thn8X4jFaQ2XN5dAIzJGU1s5JA0tjEzUwCnmrKmyMlXZaoQVrmORGjCuH0I0aAFk
RS0UtnB9HPpxhGVbs24xXZQnZDNbUQeulFxS4uP3OLDBAeCHl+v4t/uotIad8v6J
SO93vc1evIje6lguE81HHmJn9noxPItvOvSMb2yPsE8mH4cJHRTFNSEhPW6ghmlf
Wa9ZwiVX5igxcvaIRgQQEQIABgUCTk5b0gAKCRDs8OkLLBcgg1G+AKCnacLb/+W6
cflirUIExgZdUJqoogCeNPVwXiHEIVqithAM1pdY/gcaQZmIRgQQEQIABgUCTk5f
YQAKCRCpN2E5pSTFPnNWAJ9gUozyiS+9jf2rJvqmJSeWuCgVRwCcCUFhXRCpQO2Y
Va3l3WuB+rgKjsQ=
=EWWI
-----END PGP PUBLIC KEY BLOCK-----"
echo "$key"
}
function get_sign_repo_key_rsa() {
key="-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/hT2Chq4hhn+zasCn1gv
N3AVdNYGm4FVkJmWzHBc3lvoTLIMR1uoopg9EbH2faBG3yQjxtAkUme6aauaSmpm
LNvhCfENsrDhRx8KRqwNgvM8jQLOCEMZ2WSGxE4HEsBbQ7p9F4qj8D2YMrl1ZvTw
Gy2UW3wc5vMEf90lsoKmQQS3UJOUxHw0fhJ8vzNUVUeMQpRAjjRfVAQdnoxXSNSw
+OQD2z9obDf6YhQclNbe8itoKRckbfe1sxh5/TFef0y+wJkTzOKXK9yWnJrQp8V3
gmfJy6nnaErhxbocMg55QG7vCNejuV0a384ax0SRTNSZyIhps2Yuswbx9CLX8l+r
bQIDAQAB
-----END PUBLIC KEY-----"
echo "$key"
}
# Variables
NTASK=$(nproc)
# Check if we are root
if [ $(id -u) -ne 0 ] ; then
echo "❌ Run me as root"
exit 1
fi
# Detect OS
OS=""
if [ "$(grep Debian /etc/os-release)" != "" ] ; then
OS="debian"
elif [ "$(grep Ubuntu /etc/os-release)" != "" ] ; then
OS="ubuntu"
elif [ "$(grep CentOS /etc/os-release)" != "" ] ; then
OS="centos"
elif [ "$(grep Fedora /etc/os-release)" != "" ] ; then
OS="fedora"
elif [ "$(grep Arch /etc/os-release)" != "" ] ; then
OS="archlinux"
elif [ "$(grep Alpine /etc/os-release)" != "" ] ; then
OS="alpine"
fi
if [ "$OS" = "" ] ; then
echo "❌ Unsupported Operating System"
exit 1
fi
old_dir="${PWD}"
# Create /tmp/bunkerweb
if [ -e "/tmp/bunkerweb" ] ; then
echo " Remove existing /tmp/bunkerweb"
do_and_check_cmd rm -rf /tmp/bunkerweb
fi
# Create /opt/bunkerweb
if [ -d "/opt/bunkerweb" ] ; then
echo "❌️ Looks like bunkerweb is already installed. Updating is not supported yet, you need to uninstall first and then install it again."
exit 1
fi
echo " Create /opt/bunkerweb"
do_and_check_cmd mkdir /opt/bunkerweb
# Check nginx version
NGINX_CHECK_VERSION="$(nginx -V 2>&1 | sed -rn 's~^nginx version: nginx/(.*)$~\1~p')"
# Add nginx official repo and install
if [ "$NGINX_CHECK_VERSION" = "" ] ; then
get_sign_repo_key > /tmp/bunkerweb/nginx_signing.key
if [ "$OS" = "debian" ] || [ "$OS" = "ubuntu" ] ; then
echo " Add nginx official repository"
do_and_check_cmd cp /tmp/bunkerweb/nginx_signing.key /etc/apt/trusted.gpg.d/nginx_signing.asc
do_and_check_cmd apt update
DEBIAN_FRONTEND=noninteractive do_and_check_cmd apt install -y gnupg2 ca-certificates lsb-release software-properties-common
do_and_check_cmd add-apt-repository "deb http://nginx.org/packages/${OS} $(lsb_release -cs) nginx"
do_and_check_cmd apt update
echo " Install nginx"
DEBIAN_FRONTEND=noninteractive do_and_check_cmd apt install -y "nginx=$NGINX_VERSION"
elif [ "$OS" = "centos" ] ; then
echo " Add nginx official repository"
do_and_check_cmd yum install -y yum-utils
do_and_check_cmd cp /tmp/bunkerweb/nginx_signing.key /etc/pki/rpm-gpg/RPM-GPG-KEY-nginx
do_and_check_cmd rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-nginx
repo="[nginx-stable]
name=nginx stable repo
baseurl=http://nginx.org/packages/centos/\$releasever/\$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-nginx
enabled=1
module_hotfixes=true"
echo "$repo" > /tmp/bunkerweb/nginx.repo
do_and_check_cmd cp /tmp/bunkerweb/nginx.repo /etc/yum.repos.d/nginx.repo
echo " Install nginx"
do_and_check_cmd yum install -y "nginx-$NGINX_VERSION"
elif [ "$OS" = "fedora" ] ; then
echo " Install nginx"
do_and_check_cmd dnf install -y "nginx-$NGINX_VERSION"
elif [ "$OS" = "archlinux" ] ; then
echo " Update pacman DB"
do_and_check_cmd pacman -Sy
echo " Install nginx"
do_and_check_cmd pacman -S --noconfirm "nginx=$NGINX_VERSION"
elif [ "$OS" = "alpine" ] ; then
echo " Add nginx official repository"
get_sign_repo_key_rsa > /tmp/bunkerweb/nginx_signing.rsa.pub
do_and_check_cmd cp /tmp/nginx_signing.rsa.pub /etc/apk/keys/nginx_signing.rsa.pub
echo "@nginx http://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" >> /etc/apk/repositories
echo " Install nginx"
do_and_check_cmd apk add "nginx@nginx=$NGINX_VERSION"
fi
NGINX_CHECK_VERSION="$(nginx -V 2>&1 | sed -rn 's~^nginx version: nginx/(.*)$~\1~p')"
fi
echo " Detected nginx version ${NGINX_CHECK_VERSION}"
if [ "$NGINX_CHECK_VERSION" != "$NGINX_VERSION" ] ; then
echo "⚠️ Detected nginx version ${NGINX_CHECK_VERSION} but the official nginx version supported is ${NGINX_VERSION}. We recommend you to uninstall nginx and run the installation script again."
read -p "Abort installation of BunkerWeb (Y/n) ? " -n 1 -r
echo
if [ "$REPLY" = "Y" ] || [ "$REPLY" = "y"] || [ "$REPLY" = "" ] ; then
cleanup
exit 1
fi
NGINX_VERSION="$NGINX_CHECK_VERSION"
fi
# Stop nginx on Linux
if [ "$OS" != "alpine" ] ; then
systemctl status nginx > /dev/null 2>&1
if [ $? -eq 0 ] ; then
echo " Stop nginx service"
do_and_check_cmd systemctl stop nginx
fi
fi
# Install dependencies
echo " Update packet list"
if [ "$OS" = "debian" ] || [ "$OS" = "ubuntu" ] ; then
do_and_check_cmd apt update
elif [ "$OS" = "archlinux" ] ; then
do_and_check_cmd pacman -Sy
fi
echo " Install compilation and runtime dependencies"
if [ "$OS" = "debian" ] || [ "$OS" = "ubuntu" ] ; then
DEBIAN_DEPS="git autoconf pkg-config libpcre++-dev automake libtool g++ make libgd-dev libssl-dev wget libbrotli-dev gnupg patch libreadline-dev certbot python3 python3-pip procps sudo"
DEBIAN_FRONTEND=noninteractive do_and_check_cmd apt install -y $DEBIAN_DEPS
elif [ "$OS" = "centos" ] ; then
do_and_check_cmd yum install -y epel-release
CENTOS_DEPS="git autoconf pkg-config pcre-devel automake libtool gcc-c++ make gd-devel openssl-devel wget brotli-devel gnupg patch readline-devel ca-certificates certbot python3 python3-pip procps sudo"
do_and_check_cmd yum install -y $CENTOS_DEPS
elif [ "$OS" = "fedora" ] ; then
FEDORA_DEPS="git autoconf pkg-config pcre-devel automake libtool gcc-c++ make gd-devel openssl-devel wget brotli-devel gnupg libxslt-devel perl-ExtUtils-Embed gperftools-devel patch readline-devel certbot python3 python3-pip procps nginx-mod-stream sudo"
do_and_check_cmd dnf install -y $FEDORA_DEPS
elif [ "$OS" = "archlinux" ] ; then
ARCHLINUX_DEPS="git autoconf pkgconf pcre2 automake libtool gcc make gd openssl wget brotli gnupg libxslt patch readline certbot python python-pip procps sudo"
do_and_check_cmd pacman -S --noconfirm $ARCHLINUX_DEPS
elif [ "$OS" = "alpine" ] ; then
ALPINE_DEPS_COMPILE="git build autoconf libtool automake git geoip-dev yajl-dev g++ gcc curl-dev libxml2-dev pcre-dev make linux-headers musl-dev gd-dev gnupg brotli-dev openssl-dev patch readline-dev"
do_and_check_cmd apk add --no-cache --virtual build $ALPINE_DEPS_COMPILE
ALPINE_DEPS_RUNTIME="certbot bash libgcc yajl libstdc++ openssl py3-pip git"
do_and_check_cmd apk add --no-cache $ALPINE_DEPS_RUNTIME
fi
# Clone the repo
if [ ! -d "/tmp/bunkerweb-data" ] ls; then
echo " Clone bunkerity/bunkerweb"
if [ "$BUILD_MODE" = "prod" ] ; then
CHANGE_DIR="/tmp" do_and_check_cmd git_secure_clone https://github.com/bunkerity/bunkerweb.git 3d2f5e2389e5f75131ae22f822a673b92cb12cca
else
CHANGE_DIR="/tmp" do_and_check_cmd git clone https://github.com/bunkerity/bunkerweb.git
CHANGE_DIR="/tmp/bunkerweb" do_and_check_cmd git checkout dev
fi
# Or rename the folder
else
echo " Move /tmp/bunkerweb-data to /tmp/bunkerweb"
do_and_check_cmd mv /tmp/bunkerweb-data /tmp/bunkerweb
fi
# Create deps folder
echo " Create /opt/bunkerweb/deps"
do_and_check_cmd mkdir /opt/bunkerweb/deps
# Compile and install lua
echo " Compile and install lua-5.1.5"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-5.1.5" do_and_check_cmd make -j $NTASK linux
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-5.1.5" do_and_check_cmd make INSTALL_TOP=/opt/bunkerweb/deps install
# Download, compile and install libmaxminddb
echo " Compile and install libmaxminddb"
CHANGE_DIR="/tmp/bunkerweb/deps/src/libmaxminddb" do_and_check_cmd ./bootstrap
CHANGE_DIR="/tmp/bunkerweb/deps/src/libmaxminddb" do_and_check_cmd ./configure --prefix=/opt/bunkerweb/deps --disable-tests
CHANGE_DIR="/tmp/bunkerweb/deps/src/libmaxminddb" do_and_check_cmd make -j $NTASK
CHANGE_DIR="/tmp/bunkerweb/deps/src/libmaxminddb" do_and_check_cmd make install
# Download, compile and install ModSecurity
echo " Compile and install ModSecurity"
# temp fix : Debian run it twice
# TODO : patch it in clone.sh
cd /tmp/bunkerweb/deps/src/ModSecurity && ./build.sh > /dev/null 2>&1
CHANGE_DIR="/tmp/bunkerweb/deps/src/ModSecurity" do_and_check_cmd sh build.sh
CHANGE_DIR="/tmp/bunkerweb/deps/src/ModSecurity" do_and_check_cmd ./configure --disable-doxygen-doc --disable-dependency-tracking --disable-examples --prefix=/opt/bunkerweb/deps --with-maxmind=/opt/bunkerweb/deps
CHANGE_DIR="/tmp/bunkerweb/deps/src/ModSecurity" do_and_check_cmd make -j $NTASK
CHANGE_DIR="/tmp/bunkerweb/deps/src/ModSecurity" do_and_check_cmd make install-strip
# Compile and install luajit2
echo " Compile and install luajit2"
CHANGE_DIR="/tmp/bunkerweb/deps/src/luajit2" do_and_check_cmd make -j $NTASK
CHANGE_DIR="/tmp/bunkerweb/deps/src/luajit2" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps install
# Install lua-resty-core
echo " Install openresty/lua-resty-core"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-resty-core" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps install
# Install lua-resty-lrucache
echo " Install lua-resty-lrucache"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-resty-lrucache" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps install
# Install lua-resty-dns
echo " Install lua-resty-dns"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-resty-dns" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps install
# Install lua-resty-session
echo " Install lua-resty-session"
do_and_check_cmd cp -r /tmp/bunkerweb/deps/src/lua-resty-session/lib/resty/* /opt/bunkerweb/deps/lib/lua/resty
# Install lua-resty-random
echo " Install lua-resty-random"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-resty-random" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps install
# Install lua-resty-string
echo " Install lua-resty-string"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-resty-string" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps install
# Compile and install lua-cjson
echo " Compile and install lua-cjson"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-cjson" do_and_check_cmd make LUA_INCLUDE_DIR=/opt/bunkerweb/deps/include -j $NTASK
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-cjson" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps LUA_CMODULE_DIR=/opt/bunkerweb/deps/lib/lua LUA_MODULE_DIR=/opt/bunkerweb/deps/lib/lua install
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-cjson" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps LUA_CMODULE_DIR=/opt/bunkerweb/deps/lib/lua LUA_MODULE_DIR=/opt/bunkerweb/deps/lib/lua install-extra
# Compile and install lua-gd
echo " Compile and install lua-gd"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-gd" do_and_check_cmd make "CFLAGS=-O3 -Wall -fPIC -fomit-frame-pointer -I/opt/bunkerweb/deps/include -DVERSION=\\\"2.0.33r3\\\"" "LFLAGS=-shared -L/opt/bunkerweb/deps/lib -llua -lgd -Wl,-rpath=/opt/bunkerweb/deps/lib" LUABIN=/opt/bunkerweb/deps/bin/lua -j $NTASK
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-gd" do_and_check_cmd make INSTALL_PATH=/opt/bunkerweb/deps/lib/lua install
# Download and install lua-resty-http
echo " Install lua-resty-http"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-resty-http" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps install
# Download and install lualogging
echo " Install lualogging"
do_and_check_cmd cp -r /tmp/bunkerweb/deps/src/lualogging/src/* /opt/bunkerweb/deps/lib/lua
# Compile and install luasocket
echo " Compile and install luasocket"
CHANGE_DIR="/tmp/bunkerweb/deps/src/luasocket" do_and_check_cmd make LUAINC_linux=/opt/bunkerweb/deps/include -j $NTASK
CHANGE_DIR="/tmp/bunkerweb/deps/src/luasocket" do_and_check_cmd make prefix=/opt/bunkerweb/deps CDIR_linux=lib/lua LDIR_linux=lib/lua install
# Compile and install luasec
echo " Compile and install luasec"
CHANGE_DIR="/tmp/bunkerweb/deps/src/luasec" do_and_check_cmd make INC_PATH=-I/opt/bunkerweb/deps/include linux -j $NTASK
CHANGE_DIR="/tmp/bunkerweb/deps/src/luasec" do_and_check_cmd make LUACPATH=/opt/bunkerweb/deps/lib/lua LUAPATH=/opt/bunkerweb/deps/lib/lua install
# Install lua-resty-iputils
echo " Install lua-resty-iputils"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-resty-iputils" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps LUA_LIB_DIR=/opt/bunkerweb/deps/lib/lua install
# Install lua-resty-redis
echo " Install lua-resty-redis"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-resty-redis" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps LUA_LIB_DIR=/opt/bunkerweb/deps/lib/lua install
# Install lua-resty-upload
echo " Install lua-resty-upload"
CHANGE_DIR="/tmp/bunkerweb/deps/src/lua-resty-upload" do_and_check_cmd make PREFIX=/opt/bunkerweb/deps LUA_LIB_DIR=/opt/bunkerweb/deps/lib/lua install
# Compile dynamic modules
echo " Compile and install dynamic modules"
CONFARGS="$(nginx -V 2>&1 | sed -n -e 's/^.*arguments: //p')"
CONFARGS="${CONFARGS/-Os -fomit-frame-pointer -g/-Os}"
if [ "$OS" = "fedora" ] ; then
CONFARGS="$(echo -n "$CONFARGS" | sed "s/--with-ld-opt='.*'//" | sed "s/--with-cc-opt='.*'//")"
fi
echo "\#!/bin/bash" > "/tmp/bunkerweb/deps/src/nginx-${NGINX_VERSION}/configure-fix.sh"
echo "./configure $CONFARGS --add-dynamic-module=/tmp/bunkerweb/deps/src/ModSecurity-nginx --add-dynamic-module=/tmp/bunkerweb/deps/src/headers-more-nginx-module --add-dynamic-module=/tmp/bunkerweb/deps/src/ngx_http_geoip2_module --add-dynamic-module=/tmp/bunkerweb/deps/src/nginx_cookie_flag_module --add-dynamic-module=/tmp/bunkerweb/deps/src/lua-nginx-module --add-dynamic-module=/tmp/bunkerweb/deps/src/ngx_brotli" >> "/tmp/bunkerweb/deps/src/nginx-${NGINX_VERSION}/configure-fix.sh"
do_and_check_cmd chmod +x "/tmp/bunkerweb/deps/src/nginx-${NGINX_VERSION}/configure-fix.sh"
CHANGE_DIR="/tmp/bunkerweb/deps/src/nginx-${NGINX_VERSION}" LUAJIT_LIB="/opt/bunkerweb/deps/lib -Wl,-rpath,/opt/bunkerweb/deps/lib" LUAJIT_INC="/opt/bunkerweb/deps/include/luajit-2.1" MODSECURITY_LIB="/opt/bunkerweb/deps/lib" MODSECURITY_INC="/opt/bunkerweb/deps/include" do_and_check_cmd ./configure-fix.sh
CHANGE_DIR="/tmp/bunkerweb/deps/src/nginx-${NGINX_VERSION}" do_and_check_cmd make -j $NTASK modules
do_and_check_cmd mkdir /opt/bunkerweb/modules
do_and_check_cmd chown root:nginx /opt/bunkerweb/modules
do_and_check_cmd chmod 750 /opt/bunkerweb/modules
CHANGE_DIR="/tmp/bunkerweb/deps/src/nginx-${NGINX_VERSION}" do_and_check_cmd cp ./objs/*.so /opt/bunkerweb/modules
do_and_check_cmd chmod 740 /opt/bunkerweb/modules/*.so
# TODO : temp fix for fedora
if [ "$OS" = "fedora" ] ; then
cp /usr/lib64/nginx/modules/ngx_stream_module.so /opt/bunkerweb/modules/ngx_stream_module.so
fi
# Dependencies are installed
echo " Dependencies for bunkerweb successfully compiled and installed !"
# Remove build dependencies in container
if [ "$OS" = "alpine" ] ; then
echo " Remove build dependencies"
do_and_check_cmd apk del build
fi
# Install Python dependencies
echo " Install python dependencies"
do_and_check_cmd pip3 install --upgrade pip
do_and_check_cmd pip3 install -r /tmp/bunkerweb/gen/requirements.txt
do_and_check_cmd pip3 install -r /tmp/bunkerweb/job/requirements.txt
if [ "$OS" != "alpine" ] ; then
do_and_check_cmd pip3 install -r /tmp/bunkerweb/ui/requirements.txt
fi
do_and_check_cmd pip3 install cryptography --upgrade
# Copy generator
echo " Copy generator"
do_and_check_cmd cp -r /tmp/bunkerweb/gen /opt/bunkerweb
# Copy configs
echo " Copy configs"
do_and_check_cmd cp -r /tmp/bunkerweb/confs /opt/bunkerweb
# Copy LUA
echo " Copy lua"
do_and_check_cmd cp -r /tmp/bunkerweb/lua /opt/bunkerweb
# Copy misc
echo " Copy misc"
do_and_check_cmd cp -r /tmp/bunkerweb/misc /opt/bunkerweb
# Copy core
echo " Copy core"
do_and_check_cmd cp -r /tmp/bunkerweb/core /opt/bunkerweb
# Copy job
echo " Copy job"
do_and_check_cmd cp -r /tmp/bunkerweb/job /opt/bunkerweb
# Copy cli
echo " Copy cli"
do_and_check_cmd cp -r /tmp/bunkerweb/cli /opt/bunkerweb
# Copy utils
echo " Copy utils"
do_and_check_cmd cp -r /tmp/bunkerweb/utils /opt/bunkerweb
# Copy helpers
echo " Copy helpers"
do_and_check_cmd cp -r /tmp/bunkerweb/helpers /opt/bunkerweb
# Copy UI
if [ "$OS" != "alpine" ] ; then
echo " Copy UI"
do_and_check_cmd cp -r /tmp/bunkerweb/ui /opt/bunkerweb
do_and_check_cmd cp /tmp/bunkerweb/ui/bunkerweb-ui.service /lib/systemd/system
fi
# Copy settings
echo " Copy settings"
do_and_check_cmd cp /tmp/bunkerweb/settings.json /opt/bunkerweb
# Copy bwcli
echo " Copy bwcli"
do_and_check_cmd cp /tmp/bunkerweb/helpers/bwcli /usr/local/bin
# Copy VERSION
echo " Copy VERSION"
do_and_check_cmd cp /tmp/bunkerweb/VERSION /opt/bunkerweb
# Replace old nginx.service file
if [ "$OS" != "alpine" ] ; then
do_and_check_cmd mv /lib/systemd/system/nginx.service /lib/systemd/system/nginx.service.bak
do_and_check_cmd cp /tmp/bunkerweb/misc/nginx.service /lib/systemd/system/
fi
# Create nginx user
if [ "$(grep "nginx:" /etc/passwd)" = "" ] ; then
echo " Add nginx user"
do_and_check_cmd useradd -d /opt/bunkerweb -s /usr/sbin/nologin nginx
fi
# Create www folder
if [ ! -d "/opt/bunkerweb/www" ] ; then
echo " Create /opt/bunkerweb/www folder"
do_and_check_cmd mkdir /opt/bunkerweb/www
fi
# Create http-confs folder
if [ ! -d "/opt/bunkerweb/http-confs" ] ; then
echo " Create /opt/bunkerweb/http-confs folder"
do_and_check_cmd mkdir /opt/bunkerweb/http-confs
fi
# Create stream-confs folder
if [ ! -d "/opt/bunkerweb/stream-confs" ] ; then
echo " Create /opt/bunkerweb/stream-confs folder"
do_and_check_cmd mkdir /opt/bunkerweb/stream-confs
fi
# Create server-confs folder
if [ ! -d "/opt/bunkerweb/server-confs" ] ; then
echo " Create /opt/bunkerweb/server-confs folder"
do_and_check_cmd mkdir /opt/bunkerweb/server-confs
fi
# Create modsec-confs folder
if [ ! -d "/opt/bunkerweb/modsec-confs" ] ; then
echo " Create /opt/bunkerweb/modsec-confs folder"
do_and_check_cmd mkdir /opt/bunkerweb/modsec-confs
fi
# Create modsec-crs-confs folder
if [ ! -d "/opt/bunkerweb/modsec-crs-confs" ] ; then
echo " Create /opt/bunkerweb/modsec-crs-confs folder"
do_and_check_cmd mkdir /opt/bunkerweb/modsec-crs-confs
fi
# Create cache folder
if [ ! -d "/opt/bunkerweb/cache" ] ; then
echo " Create /opt/bunkerweb/cache folder"
do_and_check_cmd mkdir /opt/bunkerweb/cache
fi
# Create tmp folder
if [ ! -d "/opt/bunkerweb/tmp" ] ; then
echo " Create /opt/bunkerweb/tmp folder"
do_and_check_cmd mkdir -p /opt/bunkerweb/tmp
fi
# Create plugins folder
if [ ! -d "/opt/bunkerweb/plugins" ] ; then
echo " Create /opt/bunkerweb/plugins folder"
do_and_check_cmd mkdir /opt/bunkerweb/plugins
fi
# Set permissions for /opt/bunkerweb
echo " Set permissions on files and folders"
do_and_check_cmd chown -R root:nginx /opt/bunkerweb
do_and_check_cmd find /opt/bunkerweb -type f -exec chmod 0740 {} \;
do_and_check_cmd find /opt/bunkerweb -type d -exec chmod 0750 {} \;
do_and_check_cmd chmod 770 /opt/bunkerweb/cache
do_and_check_cmd chmod 770 /opt/bunkerweb/tmp
do_and_check_cmd chmod 750 /opt/bunkerweb/gen/main.py
do_and_check_cmd chmod 750 /opt/bunkerweb/job/main.py
do_and_check_cmd chmod 750 /opt/bunkerweb/cli/main.py
do_and_check_cmd chmod 750 /opt/bunkerweb/helpers/*.sh
# Set permissions for /usr/local/bin/bunkerweb
do_and_check_cmd chown root:nginx /usr/local/bin/bwcli
do_and_check_cmd chmod 750 /usr/local/bin/bwcli
# Set permissions for /opt
do_and_check_cmd chmod u+rx /opt
# Set permissions for /etc/nginx
do_and_check_cmd chown -R nginx:nginx /etc/nginx
do_and_check_cmd find /etc/nginx -type f -exec chmod 0774 {} \;
do_and_check_cmd find /etc/nginx -type d -exec chmod 0775 {} \;
# Set permissions for systemd files and reload config
if [ "$OS" != "alpine" ] ; then
do_and_check_cmd chown root:root /lib/systemd/system/bunkerweb-ui.service
do_and_check_cmd chmod 744 /lib/systemd/system/bunkerweb-ui.service
do_and_check_cmd chown root:root /lib/systemd/system/nginx.service
do_and_check_cmd chmod 744 /lib/systemd/system/nginx.service
do_and_check_cmd systemctl daemon-reload
fi
# Allow RX access to others on /opt/bunkerweb
do_and_check_cmd chmod 755 /opt/bunkerweb
# Allow nginx group to do nginx reload as root
if [ "$OS" != "alpine" ] ; then
do_and_check_cmd chown root:nginx /opt/bunkerweb/ui/linux.sh
do_and_check_cmd chmod 750 /opt/bunkerweb/ui/linux.sh
echo "nginx ALL=(root:root) NOPASSWD: /opt/bunkerweb/ui/linux.sh" >> /etc/sudoers
fi
# Prepare log files and folders
echo " Prepare log files and folders"
if [ ! -e "/var/log/nginx" ] ; then
do_and_check_cmd mkdir /var/log/nginx
fi
if [ ! -e "/var/log/nginx/access.log" ] ; then
do_and_check_cmd touch /var/log/nginx/access.log
fi
if [ ! -e "/var/log/nginx/error.log" ] ; then
do_and_check_cmd touch /var/log/nginx/error.log
fi
if [ ! -e "/var/log/nginx/modsec_audit.log" ] ; then
do_and_check_cmd touch /var/log/nginx/modsec_audit.log
fi
if [ ! -e "/var/log/nginx/jobs.log" ] ; then
do_and_check_cmd touch /var/log/nginx/jobs.log
fi
if [ ! -e "/var/log/nginx/ui.log" ] ; then
do_and_check_cmd touch /var/log/nginx/ui.log
fi
do_and_check_cmd chown -R root:nginx /var/log/nginx
do_and_check_cmd chmod -R 770 /var/log/nginx/
# Prepare Let's Encrypt files and folders
echo " Prepare Let's Encrypt files and folders"
if [ ! -e "/var/log/letsencrypt" ] ; then
do_and_check_cmd mkdir /var/log/letsencrypt
fi
do_and_check_cmd chown root:nginx /var/log/letsencrypt
do_and_check_cmd chmod 770 /var/log/letsencrypt
if [ ! -e "/etc/letsencrypt" ] ; then
do_and_check_cmd mkdir /etc/letsencrypt
fi
do_and_check_cmd chown root:nginx /etc/letsencrypt
do_and_check_cmd chmod 770 /etc/letsencrypt
if [ ! -e "/var/lib/letsencrypt" ] ; then
do_and_check_cmd mkdir /var/lib/letsencrypt
fi
do_and_check_cmd chown root:nginx /var/lib/letsencrypt
do_and_check_cmd chmod 770 /var/lib/letsencrypt
# Docker specific
if [ "$OS" = "alpine" ] ; then
echo " Preparing Docker image"
# prepare folders
folders="www http-confs server-confs stream-confs modsec-confs modsec-crs-confs cache plugins"
for folder in $folders ; do
if [ -e "/opt/bunkerweb/${folder}" ] ; then
do_and_check_cmd rm -rf "/opt/bunkerweb/${folder}"
fi
do_and_check_cmd mkdir "/${folder}"
do_and_check_cmd chown root:nginx "/${folder}"
do_and_check_cmd chmod 770 "/${folder}"
do_and_check_cmd ln -s "/$folder" "/opt/bunkerweb/$folder"
done
# prepare /var/log
rm -f /var/log/nginx/*
ln -s /proc/1/fd/2 /var/log/nginx/error.log
ln -s /proc/1/fd/2 /var/log/nginx/modsec_audit.log
ln -s /proc/1/fd/1 /var/log/nginx/access.log
ln -s /proc/1/fd/1 /var/log/nginx/jobs.log
fi
# We're done
cd "$old_dir"
cleanup
echo " bunkerweb successfully installed !"

View File

@ -1,56 +0,0 @@
#!/bin/bash
. /opt/bunkerweb/helpers/utils.sh
log "SCHEDULER" "" "Doing a restart ..."
# Kill the running scheduler
retry=0
if [ -f "/opt/bunkerweb/tmp/scheduler.pid" ] ; then
kill -s TERM "$(cat /opt/bunkerweb/tmp/scheduler.pid)"
ret=$?
if [ $? -ne 0 ] ; then
log "SCHEDULER" "❌" "Error while sending signal to running scheduler (exit status = $ret)"
exit 1
fi
while [ -f "/opt/bunkerweb/tmp/scheduler.pid" ] && [ $retry -lt 3 ] ; do
echo log "SCHEDULER" "" "Waiting for scheduler to stop ..."
sleep 5
retry=$((retry + 1))
done
if [ $retry -eq 3 ] ; then
log "SCHEDULER" "❌" "Timeout while waiting while waiting for scheduler to stop"
exit 1
fi
fi
if [ "$SWARM_MODE" != "yes" ] && [ "$KUBERNETES_MODE" != "yes" ] && [ "$AUTOCONF_MODE" != "yes" ] ; then
VARIABLES_PATH="/etc/nginx/variables.env"
fi
# Run jobs once in foreground
log "SCHEDULER" "" "Executing jobs ..."
if [ -v VARIABLES_PATH ] ; then
/opt/bunkerweb/scheduler/main.py --variables $VARIABLES_PATH --run
else
/opt/bunkerweb/scheduler/main.py --run
fi
ret=$?
if [ $? -ne 0 ] ; then
log "SCHEDULER" "❌" "Error while running jobs (exit status = $ret)"
exit 1
fi
# Run jobs scheduler in background
if [ -v VARIABLES_PATH ] ; then
/opt/bunkerweb/scheduler/main.py --variables $VARIABLES_PATH &
else
/opt/bunkerweb/scheduler/main.py &
fi
ret=$?
if [ $? -ne 0 ] ; then
log "SCHEDULER" "❌" "Error while running jobs (exit status = $ret)"
exit 1
fi
exit 0

View File

@ -1,71 +0,0 @@
#!/bin/bash
function do_and_check_cmd() {
if [ "$CHANGE_DIR" != "" ] ; then
cd "$CHANGE_DIR"
fi
output=$("$@" 2>&1)
ret="$?"
if [ $ret -ne 0 ] ; then
echo "❌ Error from command : $*"
echo "$output"
exit $ret
fi
#echo $output
return 0
}
# Check if we are root
if [ $(id -u) -ne 0 ] ; then
echo "❌ Run me as root"
exit 1
fi
# Detect OS
OS=""
if [ "$(grep Debian /etc/os-release)" != "" ] ; then
OS="debian"
elif [ "$(grep Ubuntu /etc/os-release)" != "" ] ; then
OS="ubuntu"
elif [ "$(grep CentOS /etc/os-release)" != "" ] ; then
OS="centos"
fi
if [ "$OS" = "" ] ; then
echo "❌ Unsupported Operating System"
exit 1
fi
# Stop nginx
systemctl status nginx > /dev/null 2>&1
if [ $? -eq 0 ] ; then
echo " Stop nginx service"
do_and_check_cmd systemctl stop nginx
fi
# Reload old nginx.service file
echo " Restore old nginx service"
do_and_check_cmd mv /lib/systemd/system/nginx.service.bak /lib/systemd/system/nginx.service
do_and_check_cmd systemctl daemon-reload
# Remove UI service
systemctl status bunkerweb-ui > /dev/null 2>&1
if [ $? -eq 0 ] ; then
echo " Stop bunkerweb-ui service"
do_and_check_cmd systemctl stop bunkerweb-ui
fi
echo " Remove bunkerweb-ui service"
do_and_check_cmd systemctl disable bunkerweb-ui
do_and_check_cmd rm -f /lib/systemd/system/bunkerweb-ui.service
do_and_check_cmd systemctl daemon-reload
do_and_check_cmd systemctl reset-failed
do_and_check_cmd sed -i "s@nginx ALL=(root:root) NOPASSWD: /opt/bunkerweb/ui/linux.sh@@" /etc/sudoers
# Remove /opt/bunkerweb
if [ -e "/opt/bunkerweb" ] ; then
echo " Remove /opt/bunkerweb"
do_and_check_cmd rm -rf /opt/bunkerweb
fi
# We're done
echo " BunkerWeb successfully uninstalled"

View File

@ -1,183 +0,0 @@
local datastore = require "datastore"
local utils = require "utils"
local cjson = require "cjson"
local plugins = require "plugins"
local upload = require "resty.upload"
local logger = require "logger"
local api = { global = { GET = {}, POST = {}, PUT = {}, DELETE = {} } }
api.response = function(self, http_status, api_status, msg)
local resp = {}
resp["status"] = api_status
resp["msg"] = msg
return http_status, resp
end
api.global.GET["^/ping$"] = function(api)
return api:response(ngx.HTTP_OK, "success", "pong")
end
api.global.POST["^/jobs$"] = function(api)
-- ngx.req.read_body()
-- local data = ngx.req.get_body_data()
-- if not data then
-- local data_file = ngx.req.get_body_file()
-- if data_file then
-- local file = io.open(data_file)
-- data = file:read("*a")
-- file:close()
-- end
-- end
-- local ok, env = pcall(cjson.decode, data)
-- if not ok then
-- return api:response(ngx.HTTP_INTERNAL_SERVER_ERROR, "error", "can't decode JSON : " .. env)
-- end
-- local file = io.open("/opt/bunkerweb/tmp/jobs.env", "w+")
-- for k, v in pairs(env) do
-- file:write(k .. "=" .. v .. "\n")
-- end
-- file:close()
local status = os.execute("/opt/bunkerweb/helpers/scheduler-restart.sh")
if status == 0 then
return api:response(ngx.HTTP_OK, "success", "jobs executed and scheduler started")
end
return api:response(ngx.HTTP_INTERNAL_SERVER_ERROR, "error", "exit status = " .. tostring(status))
end
api.global.POST["^/reload$"] = function(api)
local status = os.execute("/usr/sbin/nginx -s reload")
if status == 0 then
return api:response(ngx.HTTP_OK, "success", "reload successful")
end
return api:response(ngx.HTTP_INTERNAL_SERVER_ERROR, "error", "exit status = " .. tostring(status))
end
api.global.POST["^/stop$"] = function(api)
local status = os.execute("/usr/sbin/nginx -s quit")
if status == 0 then
return api:response(ngx.HTTP_OK, "success", "stop successful")
end
return api:response(ngx.HTTP_INTERNAL_SERVER_ERROR, "error", "exit status = " .. tostring(status))
end
api.global.POST["^/confs$"] = function(api)
local tmp = "/opt/bunkerweb/tmp/api_" .. ngx.var.uri:sub(2) .. ".tar.gz"
local destination = "/opt/bunkerweb/" .. ngx.var.uri:sub(2)
if ngx.var.uri == "/confs" then
destination = "/etc/nginx"
elseif ngx.var.uri == "/data" then
destination = "/data"
elseif ngx.var.uri == "/cache" then
destination = "/data/cache"
elseif ngx.var.uri == "/custom_configs" then
destination = "/data/configs"
end
local form, err = upload:new(4096)
if not form then
return api:response(ngx.HTTP_BAD_REQUEST, "error", err)
end
form:set_timeout(1000)
local file = io.open(tmp, "w+")
while true do
local typ, res, err = form:read()
if not typ then
file:close()
return api:response(ngx.HTTP_BAD_REQUEST, "error", err)
end
if typ == "eof" then
break
end
if typ == "body" then
file:write(res)
end
end
file:flush()
file:close()
local status = os.execute("rm -rf " .. destination .. "/*")
if status ~= 0 then
return api:response(ngx.HTTP_BAD_REQUEST, "error", "can't remove old files")
end
status = os.execute("tar xzf " .. tmp .. " -C " .. destination)
if status ~= 0 then
return api:response(ngx.HTTP_BAD_REQUEST, "error", "can't extract archive")
end
return api:response(ngx.HTTP_OK, "success", "saved data at " .. destination)
end
api.global.POST["^/data$"] = api.global.POST["^/confs$"]
api.global.POST["^/cache$"] = api.global.POST["^/confs$"]
api.global.POST["^/custom_configs$"] = api.global.POST["^/confs$"]
api.global.POST["^/unban$"] = function(api)
ngx.req.read_body()
local data = ngx.req.get_body_data()
if not data then
local data_file = ngx.req.get_body_file()
if data_file then
local file = io.open(data_file)
data = file:read("*a")
file:close()
end
end
local ok, ip = pcall(cjson.decode, data)
if not ok then
return api:response(ngx.HTTP_INTERNAL_SERVER_ERROR, "error", "can't decode JSON : " .. env)
end
datastore:delete("bans_ip_" .. ip["ip"])
return api:response(ngx.HTTP_OK, "success", "ip " .. ip["ip"] .. " unbanned")
end
api.is_allowed_ip = function(self)
local data, err = datastore:get("api_whitelist_ip")
if not data then
return false, "can't access api_allowed_ips in datastore"
end
if utils.is_ip_in_networks(ngx.var.remote_addr, cjson.decode(data).data) then
return true, "ok"
end
return false, "IP is not in API_WHITELIST_IP"
end
api.do_api_call = function(self)
if self.global[ngx.var.request_method] ~= nil then
for uri, api_fun in pairs(self.global[ngx.var.request_method]) do
if string.match(ngx.var.uri, uri) then
local status, resp = api_fun(self)
local ret = true
if status ~= ngx.HTTP_OK then
ret = false
end
return ret, resp["msg"], status, cjson.encode(resp)
end
end
end
local list, err = plugins:list()
if not list then
local status, resp = self:response(ngx.HTTP_INTERNAL_SERVER_ERROR, "error", "can't list loaded plugins : " .. err)
return false, resp["msg"], ngx.HTTP_INTERNAL_SERVER_ERROR, resp
end
for i, plugin in ipairs(list) do
if pcall(require, plugin.id .. "/" .. plugin.id) then
local plugin_lua = require(plugin.id .. "/" .. plugin.id)
if plugin_lua.api ~= nil then
local matched, status, resp = plugin_lua.api()
if matched then
local ret = true
if status ~= ngx.HTTP_OK then
ret = false
end
return ret, resp["msg"], status, cjson.encode(resp)
end
end
end
end
local resp = {}
resp["status"] = "error"
resp["msg"] = "not found"
return false, "error", ngx.HTTP_NOT_FOUND, cjson.encode(resp)
end
return api

View File

@ -1,6 +0,0 @@
local geoip = require "geoip.mmdb"
return {
country_db = geoip.load_database("/opt/bunkerweb/cache/country.mmdb"),
asn_db = geoip.load_database("/opt/bunkerweb/cache/asn.mmdb")
}

View File

@ -1,365 +0,0 @@
local datastore = require "datastore"
local ipmatcher = require "resty.ipmatcher"
local cjson = require "cjson"
local resolver = require "resty.dns.resolver"
local mmdb = require "mmdb"
local logger = require "logger"
local utils = {}
utils.set_values = function()
local reserved_ips = {
"0.0.0.0/8",
"10.0.0.0/8",
"100.64.0.0/10",
"127.0.0.0/8",
"169.254.0.0/16",
"172.16.0.0/12",
"192.0.0.0/24",
"192.88.99.0/24",
"192.168.0.0/16",
"198.18.0.0/15",
"198.51.100.0/24",
"203.0.113.0/24",
"224.0.0.0/4",
"233.252.0.0/24",
"240.0.0.0/4",
"255.255.255.255/32"
}
local ok, err = datastore:set("misc_reserved_ips", cjson.encode({data = reserved_ips}))
if not ok then
return false, err
end
local var_resolvers, err = datastore:get("variable_DNS_RESOLVERS")
if not var_resolvers then
return false, err
end
local list_resolvers = {}
for str_resolver in var_resolvers:gmatch("%S+") do
table.insert(list_resolvers, str_resolver)
end
ok, err = datastore:set("misc_resolvers", cjson.encode(list_resolvers))
if not ok then
return false, err
end
return true, "success"
end
utils.get_variable = function(var, site_search)
if site_search == nil then
site_search = true
end
local value, err = datastore:get("variable_" .. var)
if not value then
return nil, "Can't access variable " .. var .. " from datastore : " .. err
end
if site_search then
local multisite, err = datastore:get("variable_MULTISITE")
if not multisite then
return nil, "Can't access variable MULTISITE from datastore : " .. err
end
if multisite == "yes" and ngx.var.server_name then
local value_site, err = datastore:get("variable_" .. ngx.var.server_name .. "_" .. var)
if value_site then
value = value_site
end
end
end
return value, "success"
end
utils.has_variable = function(var, value)
local check_value, err = datastore:get("variable_" .. var)
if not value then
return nil, "Can't access variable " .. var .. " from datastore : " .. err
end
local multisite, err = datastore:get("variable_MULTISITE")
if not multisite then
return nil, "Can't access variable MULTISITE from datastore : " .. err
end
if multisite == "yes" then
local servers, err = datastore:get("variable_SERVER_NAME")
if not servers then
return nil, "Can't access variable SERVER_NAME from datastore : " .. err
end
for server in servers:gmatch("%S+") do
local check_value_site, err = datastore:get("variable_" .. server .. "_" .. var)
if check_value_site and check_value_site == value then
return true, "success"
end
end
return false, "success"
end
return check_value == value, "success"
end
utils.has_not_variable = function(var, value)
local check_value, err = datastore:get("variable_" .. var)
if not value then
return nil, "Can't access variable " .. var .. " from datastore : " .. err
end
local multisite, err = datastore:get("variable_MULTISITE")
if not multisite then
return nil, "Can't access variable MULTISITE from datastore : " .. err
end
if multisite == "yes" then
local servers, err = datastore:get("variable_SERVER_NAME")
if not servers then
return nil, "Can't access variable SERVER_NAME from datastore : " .. err
end
for server in servers:gmatch("%S+") do
local check_value_site, err = datastore:get("variable_" .. server .. "_" .. var)
if check_value_site and check_value_site ~= value then
return true, "success"
end
end
return false, "success"
end
return check_value ~= value, "success"
end
function utils.get_multiple_variables(vars)
local keys = datastore:keys()
local result = {}
for i, key in ipairs(keys) do
for j, var in ipairs(vars) do
local _, _, server, subvar = key:find("variable_(.*)_?(" .. var .. "_?%d*)")
if subvar then
if not server or server == "" then
server = "global"
else
server = server:sub(1, -2)
end
if result[server] == nil then
result[server] = {}
end
local value, err = datastore:get(key)
if not value then
return nil, err
end
result[server][subvar] = value
end
end
end
return result
end
utils.is_ip_in_networks = function(ip, networks)
local ipm, err = ipmatcher.new(networks)
if not ipm then
return nil, "can't instantiate ipmatcher : " .. err
end
local matched, err = ipm:match(ip)
if err then
return nil, "can't check ip : " .. err
end
return matched
end
utils.is_ipv4 = function(ip)
return ipmatcher.parse_ipv4(ip)
end
utils.is_ipv6 = function(ip)
return ipmatcher.parse_ipv6(ip)
end
utils.ip_is_global = function(ip)
local data, err = datastore:get("misc_reserved_ips")
if not data then
return nil, "can't get reserved ips : " .. err
end
local ok, reserved_ips = pcall(cjson.decode, data)
if not ok then
return nil, "can't decode json : " .. reserved_ips
end
local ipm, err = ipmatcher.new(reserved_ips.data)
if not ipm then
return nil, "can't instantiate ipmatcher : " .. err
end
local matched, err = ipm:match(ip)
if err then
return nil, "can't check ip : " .. err
end
return not matched, "success"
end
utils.get_integration = function()
local integration, err = datastore:get("misc_integration")
if integration then
return integration
end
local var, err = datastore:get("variable_SWARM_MODE")
if var == "yes" then
integration = "swarm"
else
local var, err = datastore:get("variable_KUBERNETES_MODE")
if var == "yes" then
integration = "kubernetes"
else
local f, err = io.open("/etc/os-release", "r")
if f then
local data = f:read("*a")
if data:find("Alpine") then
integration = "docker"
else
integration = "unknown"
end
f:close()
else
integration = "unknown"
end
end
end
local ok, err = datastore:set("misc_integration", integration)
if not ok then
logger.log(ngx.ERR, "UTILS", "Can't cache integration to datastore : " .. err)
end
return integration
end
utils.get_version = function()
local version, err = datastore:get("misc_version")
if version then
return version
end
local f, err = io.open("/opt/bunkerweb/VERSION", "r")
if not f then
logger.log(ngx.ERR, "UTILS", "Can't read VERSION file : " .. err)
return "unknown"
end
version = f:read("*a")
f:close()
local ok, err = datastore:set("misc_version", version)
if not ok then
logger.log(ngx.ERR, "UTILS", "Can't cache version to datastore : " .. err)
end
return version
end
utils.get_reason = function()
if ngx.var.reason and ngx.var.reason ~= "" then
return ngx.var.reason
end
if os.getenv("REASON") == "modsecurity" then
return "modsecurity"
end
local banned, err = datastore:get("bans_ip_" .. ngx.var.remote_addr)
if banned then
return banned
end
if ngx.status == utils.get_deny_status() then
return "unknown"
end
return nil
end
utils.get_rdns = function(ip)
local str_resolvers, err = datastore:get("misc_resolvers")
if not str_resolvers then
return false, err
end
local resolvers = cjson.decode(str_resolvers)
local rdns, err = resolver:new{
nameservers = resolvers,
retrans = 1,
timeout = 1000
}
if not rdns then
return false, err
end
local answers, err = rdns:reverse_query(ip)
if not answers then
return false, err
end
if answers.errcode then
return false, answers.errstr
end
for i, answer in ipairs(answers) do
if answer.ptrdname then
return answer.ptrdname, "success"
end
end
return false, nil
end
utils.get_ips = function(fqdn, resolvers)
local str_resolvers, err = datastore:get("misc_resolvers")
if not str_resolvers then
return false, err
end
local resolvers = cjson.decode(str_resolvers)
local rdns, err = resolver:new{
nameservers = resolvers,
retrans = 1,
timeout = 1000
}
if not rdns then
return false, err
end
local answers, err = rdns:query(fqdn, nil, {})
if not answers then
return false, err
end
if answers.errcode then
return {}, answers.errstr
end
local ips = {}
for i, answer in ipairs(answers) do
if answer.address then
table.insert(ips, answer.addres)
end
end
return ips, "success"
end
utils.get_country = function(ip)
if not mmdb.country_db then
return false, "mmdb country not loaded"
end
local ok, result, err = pcall(mmdb.country_db.lookup, mmdb.country_db, ip)
if not ok then
return nil, result
end
if not result then
return nil, err
end
return result.country.iso_code, "success"
end
utils.get_asn = function(ip)
if not mmdb.asn_db then
return false, "mmdb asn not loaded"
end
local ok, result, err = pcall(mmdb.asn_db.lookup, mmdb.asn_db, ip)
if not ok then
return nil, result
end
if not result then
return nil, err
end
return result.autonomous_system_number, "success"
end
utils.rand = function(nb)
local charset = {}
for i = 48, 57 do table.insert(charset, string.char(i)) end
for i = 65, 90 do table.insert(charset, string.char(i)) end
for i = 97, 122 do table.insert(charset, string.char(i)) end
local result = ""
for i = 1, nb do
result = result .. charset[math.random(1, #charset)]
end
return result
end
utils.get_deny_status = function ()
local status, err = datastore:get("variable_DENY_HTTP_STATUS")
if not status then
logger.log(ngx.ERR, "UTILS", "Can't get DENY_HTTP_STATUS variable " .. err)
return 403
end
return tonumber(status)
end
return utils

File diff suppressed because it is too large Load Diff

View File

@ -580,7 +580,7 @@ spec:
livenessProbe:
exec:
command:
- /opt/bunkerweb/helpers/healthcheck.sh
- /usr/share/bunkerweb/helpers/healthcheck.sh
initialDelaySeconds: 30
periodSeconds: 5
timeoutSeconds: 1
@ -588,7 +588,7 @@ spec:
readinessProbe:
exec:
command:
- /opt/bunkerweb/helpers/healthcheck.sh
- /usr/share/bunkerweb/helpers/healthcheck.sh
initialDelaySeconds: 30
periodSeconds: 1
timeoutSeconds: 1
@ -803,9 +803,9 @@ Repositories of Linux packages for BunkerWeb are available on [PackageCloud](htt
The first step is to install NGINX 1.20.2 using the repository of your choice or by [compiling it from source](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-open-source/#compiling-and-installing-from-source).
The target installation folder of BunkerWeb is located at `/opt/bunkerweb`, let's create it :
The target installation folder of BunkerWeb is located at `/usr/share/bunkerweb`, let's create it :
```shell
mkdir /opt/bunkerweb
mkdir /usr/share/bunkerweb
```
You can now clone the BunkerWeb project to the `/tmp` folder :
@ -813,40 +813,44 @@ Repositories of Linux packages for BunkerWeb are available on [PackageCloud](htt
https://github.com/bunkerity/bunkerweb.git /tmp/bunkerweb
```
BunkerWeb needs some dependencies to be compiled and installed to `/opt/bunkerweb/deps`, the easiest way to do it is by executing the [install.sh helper script](https://github.com/bunkerity/bunkerweb/blob/master/deps/install.sh) (please note that you will need to install additional packages which is not covered in this procedure and depends on your own system) :
BunkerWeb needs some dependencies to be compiled and installed to `/usr/share/bunkerweb/deps`, the easiest way to do it is by executing the [install.sh helper script](https://github.com/bunkerity/bunkerweb/blob/master/deps/install.sh) (please note that you will need to install additional packages which is not covered in this procedure and depends on your own system) :
```
mkdir /opt/bunkerweb/deps && \
mkdir /usr/share/bunkerweb/deps && \
/tmp/bunkerweb/deps/install.sh
```
Additional Python dependencies needs to be installed into the `/opt/bunkerweb/deps/python` folder :
Additional Python dependencies needs to be installed into the `/usr/share/bunkerweb/deps/python` folder :
```shell
mkdir /opt/bunkerweb/deps/python && \
pip install --no-cache-dir --require-hashes --target /opt/bunkerweb/deps/python -r /tmp/bunkerweb/deps/requirements.txt && \
pip install --no-cache-dir --target /opt/bunkerweb/deps/python -r /tmp/bunkerweb/ui/requirements.txt
mkdir /usr/share/bunkerweb/deps/python && \
pip install --no-cache-dir --require-hashes --target /usr/share/bunkerweb/deps/python -r /tmp/bunkerweb/deps/requirements.txt && \
pip install --no-cache-dir --target /usr/share/bunkerweb/deps/python -r /tmp/bunkerweb/ui/requirements.txt && \
pip install --no-cache-dir gunicorn
```
Once dependencies are installed, you will be able to copy the BunkerWeb sources to the target `/opt/bunkerweb` folder :
Once dependencies are installed, you will be able to copy the BunkerWeb sources to the target `/usr/share/bunkerweb` folder :
```shell
for src in api cli confs core gen helpers job lua misc utils ui settings.json VERSION linux/variables.env linux/ui.env linux/scripts ; do
cp -r /tmp/bunkerweb/${src} /opt/bunkerweb
cp -r /tmp/bunkerweb/${src} /usr/share/bunkerweb
done
cp /opt/bunkerweb/helpers/bwcli /usr/local/bin
cp /usr/share/bunkerweb/helpers/bwcli /usr/bin
```
Additional folders also need to be created :
```shell
mkdir /opt/bunkerweb/{configs,cache,plugins,tmp}
mkdir -p /etc/bunkerweb/configs && \
mkdir -p /var/cache/bunkerweb && \
mkdir -p /etc/bunkerweb/plugins && \
mkdir -p /var/tmp/bunkerweb
```
Permissions needs to be fixed :
```shell
find /opt/bunkerweb -path /opt/bunkerweb/deps -prune -o -type f -exec chmod 0740 {} \; && \
find /opt/bunkerweb -path /opt/bunkerweb/deps -prune -o -type d -exec chmod 0750 {} \; && \
find /opt/bunkerweb/core/*/jobs/* -type f -exec chmod 750 {} \; && \
chmod 770 /opt/bunkerweb/cache /opt/bunkerweb/tmp && \
chmod 750 /opt/bunkerweb/gen/main.py /opt/bunkerweb/job/main.py /opt/bunkerweb/cli/main.py /opt/bunkerweb/helpers/*.sh /opt/bunkerweb/scripts/*.sh /usr/local/bin/bwcli /opt/bunkerweb/ui/main.py && \
chown -R root:nginx /opt/bunkerweb
find /usr/share/bunkerweb -path /usr/share/bunkerweb/deps -prune -o -type f -exec chmod 0740 {} \; && \
find /usr/share/bunkerweb -path /usr/share/bunkerweb/deps -prune -o -type d -exec chmod 0750 {} \; && \
find /usr/share/bunkerweb/core/*/jobs/* -type f -exec chmod 750 {} \; && \
chmod 770 /var/cache/bunkerweb /var/tmp/bunkerweb && \
chmod 750 /usr/share/bunkerweb/gen/main.py /usr/share/bunkerweb/scheduler/main.py /usr/share/bunkerweb/cli/main.py /usr/share/bunkerweb/helpers/*.sh /usr/share/bunkerweb/scripts/*.sh /usr/bin/bwcli /usr/share/bunkerweb/ui/main.py && \
chown -R root:nginx /usr/share/bunkerweb
```
Last but not least, you will need to set up systemd unit files :
@ -859,7 +863,7 @@ Repositories of Linux packages for BunkerWeb are available on [PackageCloud](htt
systemctl enable bunkerweb-ui
```
The configuration of BunkerWeb is done by editing the `/opt/bunkerweb/variables.env` file :
The configuration of BunkerWeb is done by editing the `/etc/bunkerweb/variables.env` file :
```conf
MY_SETTING_1=value1
@ -923,16 +927,16 @@ ansible-playbook -i inventory.yml playbook.yml
Configuration of BunkerWeb is done by using specific role variables :
| Name | Type | Description | Default value |
|:-----:|:-----:|--------------|----------------|
| `bunkerweb_version` | string | Version of BunkerWeb to install. | `1.4.3` |
| `nginx_version` | string | Version of NGINX to install. | `1.20.2` |
| `freeze_versions` | boolean | Prevent upgrade of BunkerWeb and NGINX when performing packages upgrades. | `true` |
| `variables_env` | string | Path of the variables.env file to configure BunkerWeb. | `files/variables.env` |
| `enable_ui` | boolean | Activate the web UI. | `false` |
| `custom_ui` | string | Path of the ui.env file to configure the web UI. | `files/ui.env` |
| `custom_configs_path` | Dictionary | Each entry is a path of the folder containing custom configurations. Keys are the type of custom configs : `http`, `server-http`, `modsec`, `modsec-crs` and `default-server-http` | empty values |
| `custom_www` | string | Path of the www directory to upload. | empty value |
| `custom_plugins` | string | Path of the plugins directory to upload. | empty value |
| `custom_www_owner` | string | Default owner for www files and folders. | `nginx` |
| `custom_www_group` | string | Default group for www files and folders. | `nginx` |
| Name | Type | Description | Default value |
| :-------------------: | :--------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- |
| `bunkerweb_version` | string | Version of BunkerWeb to install. | `1.4.3` |
| `nginx_version` | string | Version of NGINX to install. | `1.20.2` |
| `freeze_versions` | boolean | Prevent upgrade of BunkerWeb and NGINX when performing packages upgrades. | `true` |
| `variables_env` | string | Path of the variables.env file to configure BunkerWeb. | `files/variables.env` |
| `enable_ui` | boolean | Activate the web UI. | `false` |
| `custom_ui` | string | Path of the ui.env file to configure the web UI. | `files/ui.env` |
| `custom_configs_path` | Dictionary | Each entry is a path of the folder containing custom configurations. Keys are the type of custom configs : `http`, `server-http`, `modsec`, `modsec-crs` and `default-server-http` | empty values |
| `custom_www` | string | Path of the www directory to upload. | empty value |
| `custom_plugins` | string | Path of the plugins directory to upload. | empty value |
| `custom_www_owner` | string | Default owner for www files and folders. | `nginx` |
| `custom_www_group` | string | Default group for www files and folders. | `nginx` |

View File

@ -41,13 +41,13 @@ print(
# Print global settings
print("## Global settings\n")
with open("settings.json", "r") as f:
with open("src/common/settings.json", "r") as f:
print_md_table(loads(f.read()))
# Print core settings
print("## Core settings\n")
core_settings = {}
for core in glob("./core/*/plugin.json"):
for core in glob("src/common/core/*/plugin.json"):
with open(core, "r") as f:
core_plugin = loads(f.read())
if len(core_plugin["settings"]) > 0:

View File

@ -8,6 +8,6 @@ fi
if [ "$1" == "dev" ] ; then
mike deploy --push --update-aliases dev
else
mike deploy --push --update-aliases "$(cat VERSION | sed -E 's/([0-9]+)\.([0-9]+)\.([0-9]+)/\1\.\2/')" latest
mike deploy --push --update-aliases "$(cat src/VERSION | sed -E 's/([0-9]+)\.([0-9]+)\.([0-9]+)/\1\.\2/')" latest
mike set-default --push latest
fi

View File

@ -113,7 +113,7 @@ The first step is to install the plugin by putting the plugin files inside the c
=== "Linux"
When using the [Linux integration](/1.4/integrations/#linux), plugins must be written to the `/opt/bunkerweb/plugins` folder :
When using the [Linux integration](/1.4/integrations/#linux), plugins must be written to the `/etc/bunkerweb/plugins` folder :
```shell
git clone https://github.com/bunkerity/bunkerweb-plugins && \
cp -rp ./bunkerweb-plugins/* /data/plugins

View File

@ -247,7 +247,7 @@ You will find more settings about reverse proxy in the [settings section](/1.4/s
python3 -m http.server -b 127.0.0.1
```
Configuration of BunkerWeb is done by editing the `/opt/bunkerweb/variables.env` file :
Configuration of BunkerWeb is done by editing the `/etc/bunkerweb/variables.env` file :
```conf
SERVER_NAME=www.example.com
HTTP_PORT=80
@ -852,7 +852,7 @@ You will find more settings about reverse proxy in the [settings section](/1.4/s
python3 -m http.server -b 127.0.0.1 8003
```
Configuration of BunkerWeb is done by editing the `/opt/bunkerweb/variables.env` file :
Configuration of BunkerWeb is done by editing the `/etc/bunkerweb/variables.env` file :
```conf
SERVER_NAME=app1.example.com app2.example.com app3.example.com
HTTP_PORT=80
@ -1076,7 +1076,7 @@ REAL_IP_HEADER=X-Forwarded-For
=== "Linux"
You will need to add the settings to the `/opt/bunkerweb/variables.env` file :
You will need to add the settings to the `/etc/bunkerweb/variables.env` file :
```conf
...
USE_REAL_IP=yes
@ -1248,7 +1248,7 @@ REAL_IP_HEADER=proxy_protocol
=== "Linux"
You will need to add the settings to the `/opt/bunkerweb/variables.env` file :
You will need to add the settings to the `/etc/bunkerweb/variables.env` file :
```conf
...
USE_REAL_IP=yes
@ -1502,7 +1502,7 @@ Some integrations offer a more convenient way of applying configurations such as
=== "Linux"
When using the [Linux integration](/1.4/integrations/#linux), custom configurations must be written to the /opt/bunkerweb/configs folder.
When using the [Linux integration](/1.4/integrations/#linux), custom configurations must be written to the /etc/bunkerweb/configs folder.
Here is an example for server-http/hello-world.conf :
```conf
@ -1516,8 +1516,8 @@ Some integrations offer a more convenient way of applying configurations such as
Because BunkerWeb runs as an unprivileged user (nginx:nginx), you will need to edit the permissions :
```shell
chown -R root:nginx /opt/bunkerweb/configs && \
chmod -R 770 /opt/bunkerweb/configs
chown -R root:nginx /etc/bunkerweb/configs && \
chmod -R 770 /etc/bunkerweb/configs
```
Don't forget to restart the BunkerWeb service once it's done.
@ -1793,9 +1793,9 @@ BunkerWeb supports PHP using external or remote [PHP-FPM](https://www.php.net/ma
We will assume that you already have the [Linux integration](/1.4/integrations/#linux) stack running on your machine.
By default, BunkerWeb will search for web files inside the `/opt/bunkerweb/www` folder. You can use it to store your PHP application. Please note that you will need to configure your PHP-FPM service to get or set the user/group of the running processes and the UNIX socket file used to communicate with BunkerWeb.
By default, BunkerWeb will search for web files inside the `/var/www/html` folder. You can use it to store your PHP application. Please note that you will need to configure your PHP-FPM service to get or set the user/group of the running processes and the UNIX socket file used to communicate with BunkerWeb.
First of all, you will need to make sure that your PHP-FPM instance can access the files inside the `/opt/bunkerweb/www` folder and also that BunkerWeb can access the UNIX socket file in order to communicate with PHP-FPM. We recommend to set a different user like `www-data` for the PHP-FPM service and to give the nginx group access to the UNIX socket file. Here is corresponding PHP-FPM configuration :
First of all, you will need to make sure that your PHP-FPM instance can access the files inside the `/var/www/html` folder and also that BunkerWeb can access the UNIX socket file in order to communicate with PHP-FPM. We recommend to set a different user like `www-data` for the PHP-FPM service and to give the nginx group access to the UNIX socket file. Here is corresponding PHP-FPM configuration :
```ini
...
[www]
@ -1813,14 +1813,14 @@ BunkerWeb supports PHP using external or remote [PHP-FPM](https://www.php.net/ma
systemctl restart php-fpm
```
Once your application is copied to the `/opt/bunkerweb/www` folder, you will need to fix the permissions so BunkerWeb (user/group nginx) can at least read files and list folders and PHP-FPM (user/group www-data) is the owner of the files and folders :
Once your application is copied to the `/var/www/html` folder, you will need to fix the permissions so BunkerWeb (user/group nginx) can at least read files and list folders and PHP-FPM (user/group www-data) is the owner of the files and folders :
```shell
chown -R www-data:nginx /opt/bunkerweb/www && \
find /opt/bunkerweb/www -type f -exec chmod 0640 {} \; && \
find /opt/bunkerweb/www -type d -exec chmod 0750 {} \;
chown -R www-data:nginx /var/www/html && \
find /var/www/html -type f -exec chmod 0640 {} \; && \
find /var/www/html -type d -exec chmod 0750 {} \;
```
You can now edit the `/opt/bunkerweb/variable.env` file :
You can now edit the `/etc/bunkerweb/variable.env` file :
```env
HTTP_PORT=80
HTTPS_PORT=443
@ -1828,7 +1828,7 @@ BunkerWeb supports PHP using external or remote [PHP-FPM](https://www.php.net/ma
SERVER_NAME=www.example.com
AUTO_LETS_ENCRYPT=yes
LOCAL_PHP=/run/php/php-fpm.sock
LOCAL_PHP_PATH=/opt/bunkerweb/www/
LOCAL_PHP_PATH=/var/www/html/
```
Let's check the status of BunkerWeb :
@ -1847,9 +1847,9 @@ BunkerWeb supports PHP using external or remote [PHP-FPM](https://www.php.net/ma
=== "Ansible"
By default, BunkerWeb will search for web files inside the `/opt/bunkerweb/www` folder. You can use it to store your PHP application. Please note that you will need to configure your PHP-FPM service to get or set the user/group of the running processes and the UNIX socket file used to communicate with BunkerWeb.
By default, BunkerWeb will search for web files inside the `/var/www/html` folder. You can use it to store your PHP application. Please note that you will need to configure your PHP-FPM service to get or set the user/group of the running processes and the UNIX socket file used to communicate with BunkerWeb.
First of all, you will need to make sure that your PHP-FPM instance can access the files inside the `/opt/bunkerweb/www` folder and also that BunkerWeb can access the UNIX socket file in order to communicate with PHP-FPM. We recommend to set a different user like `www-data` for the PHP-FPM service and to give the nginx group access to the UNIX socket file. Here is corresponding PHP-FPM configuration :
First of all, you will need to make sure that your PHP-FPM instance can access the files inside the `/var/www/html` folder and also that BunkerWeb can access the UNIX socket file in order to communicate with PHP-FPM. We recommend to set a different user like `www-data` for the PHP-FPM service and to give the nginx group access to the UNIX socket file. Here is corresponding PHP-FPM configuration :
```ini
...
[www]
@ -1873,10 +1873,10 @@ BunkerWeb supports PHP using external or remote [PHP-FPM](https://www.php.net/ma
SERVER_NAME=www.example.com
AUTO_LETS_ENCRYPT=yes
LOCAL_PHP=/run/php/php-fpm.sock
LOCAL_PHP_PATH=/opt/bunkerweb/www/
LOCAL_PHP_PATH=/var/www/html/
```
The `custom_site` variable can be used to specify a directory containing your application files (e.g : `my_app`) that will be copied to `/opt/bunkerweb/www` and the `custom_www_owner` variable contains the owner that should be set for the files and folders. Here is an example using the Ansible inventory :
The `custom_site` variable can be used to specify a directory containing your application files (e.g : `my_app`) that will be copied to `/var/www/html` and the `custom_www_owner` variable contains the owner that should be set for the files and folders. Here is an example using the Ansible inventory :
```ini
[mybunkers]
192.168.0.42 variables_env="{{ playbook_dir }}/my_variables.env" custom_www="{{ playbook_dir }}/my_app" custom_www_owner="www-data"
@ -2298,9 +2298,9 @@ BunkerWeb supports PHP using external or remote [PHP-FPM](https://www.php.net/ma
We will assume that you already have the [Linux integration](/1.4/integrations/#linux) stack running on your machine.
By default, BunkerWeb will search for web files inside the `/opt/bunkerweb/www` folder. You can use it to store your PHP applications : each application will be in its own subfolder named the same as the primary server name. Please note that you will need to configure your PHP-FPM service to get or set the user/group of the running processes and the UNIX socket file used to communicate with BunkerWeb.
By default, BunkerWeb will search for web files inside the `/var/www/html` folder. You can use it to store your PHP applications : each application will be in its own subfolder named the same as the primary server name. Please note that you will need to configure your PHP-FPM service to get or set the user/group of the running processes and the UNIX socket file used to communicate with BunkerWeb.
First of all, you will need to make sure that your PHP-FPM instance can access the files inside the `/opt/bunkerweb/www` folder and also that BunkerWeb can access the UNIX socket file in order to communicate with PHP-FPM. We recommend to set a different user like `www-data` for the PHP-FPM service and to give the nginx group access to the UNIX socket file. Here is corresponding PHP-FPM configuration :
First of all, you will need to make sure that your PHP-FPM instance can access the files inside the `/var/www/html` folder and also that BunkerWeb can access the UNIX socket file in order to communicate with PHP-FPM. We recommend to set a different user like `www-data` for the PHP-FPM service and to give the nginx group access to the UNIX socket file. Here is corresponding PHP-FPM configuration :
```ini
...
[www]
@ -2318,14 +2318,14 @@ BunkerWeb supports PHP using external or remote [PHP-FPM](https://www.php.net/ma
systemctl restart php-fpm
```
Once your application is copied to the `/opt/bunkerweb/www` folder, you will need to fix the permissions so BunkerWeb (user/group nginx) can at least read files and list folders and PHP-FPM (user/group www-data) is the owner of the files and folders :
Once your application is copied to the `/var/www/html` folder, you will need to fix the permissions so BunkerWeb (user/group nginx) can at least read files and list folders and PHP-FPM (user/group www-data) is the owner of the files and folders :
```shell
chown -R www-data:nginx /opt/bunkerweb/www && \
find /opt/bunkerweb/www -type f -exec chmod 0640 {} \; && \
find /opt/bunkerweb/www -type d -exec chmod 0750 {} \;
chown -R www-data:nginx /var/www/html && \
find /var/www/html -type f -exec chmod 0640 {} \; && \
find /var/www/html -type d -exec chmod 0750 {} \;
```
You can now edit the `/opt/bunkerweb/variable.env` file :
You can now edit the `/etc/bunkerweb/variable.env` file :
```env
HTTP_PORT=80
HTTPS_PORT=443
@ -2334,11 +2334,11 @@ BunkerWeb supports PHP using external or remote [PHP-FPM](https://www.php.net/ma
MULTISITE=yes
AUTO_LETS_ENCRYPT=yes
app1.example.com_LOCAL_PHP=/run/php/php-fpm.sock
app1.example.com_LOCAL_PHP_PATH=/opt/bunkerweb/www/app1.example.com
app1.example.com_LOCAL_PHP_PATH=/var/www/html/app1.example.com
app2.example.com_LOCAL_PHP=/run/php/php-fpm.sock
app2.example.com_LOCAL_PHP_PATH=/opt/bunkerweb/www/app2.example.com
app2.example.com_LOCAL_PHP_PATH=/var/www/html/app2.example.com
app3.example.com_LOCAL_PHP=/run/php/php-fpm.sock
app3.example.com_LOCAL_PHP_PATH=/opt/bunkerweb/www/app3.example.com
app3.example.com_LOCAL_PHP_PATH=/var/www/html/app3.example.com
```
Let's check the status of BunkerWeb :
@ -2357,9 +2357,9 @@ BunkerWeb supports PHP using external or remote [PHP-FPM](https://www.php.net/ma
=== "Ansible"
By default, BunkerWeb will search for web files inside the `/opt/bunkerweb/www` folder. You can use it to store your PHP application : each application will be in its own subfolder named the same as the primary server name. Please note that you will need to configure your PHP-FPM service to get or set the user/group of the running processes and the UNIX socket file used to communicate with BunkerWeb.
By default, BunkerWeb will search for web files inside the `/var/www/html` folder. You can use it to store your PHP application : each application will be in its own subfolder named the same as the primary server name. Please note that you will need to configure your PHP-FPM service to get or set the user/group of the running processes and the UNIX socket file used to communicate with BunkerWeb.
First of all, you will need to make sure that your PHP-FPM instance can access the files inside the `/opt/bunkerweb/www` folder and also that BunkerWeb can access the UNIX socket file in order to communicate with PHP-FPM. We recommend to set a different user like `www-data` for the PHP-FPM service and to give the nginx group access to the UNIX socket file. Here is corresponding PHP-FPM configuration :
First of all, you will need to make sure that your PHP-FPM instance can access the files inside the `/var/www/html` folder and also that BunkerWeb can access the UNIX socket file in order to communicate with PHP-FPM. We recommend to set a different user like `www-data` for the PHP-FPM service and to give the nginx group access to the UNIX socket file. Here is corresponding PHP-FPM configuration :
```ini
...
[www]
@ -2384,14 +2384,14 @@ BunkerWeb supports PHP using external or remote [PHP-FPM](https://www.php.net/ma
MULTISITE=yes
AUTO_LETS_ENCRYPT=yes
app1.example.com_LOCAL_PHP=/run/php/php-fpm.sock
app1.example.com_LOCAL_PHP_PATH=/opt/bunkerweb/www/app1.example.com
app1.example.com_LOCAL_PHP_PATH=/var/www/html/app1.example.com
app2.example.com_LOCAL_PHP=/run/php/php-fpm.sock
app2.example.com_LOCAL_PHP_PATH=/opt/bunkerweb/www/app2.example.com
app2.example.com_LOCAL_PHP_PATH=/var/www/html/app2.example.com
app3.example.com_LOCAL_PHP=/run/php/php-fpm.sock
app3.example.com_LOCAL_PHP_PATH=/opt/bunkerweb/www/app3.example.com
app3.example.com_LOCAL_PHP_PATH=/var/www/html/app3.example.com
```
The `custom_site` variable can be used to specify a directory containing your application files (e.g : `my_app`) that will be copied to `/opt/bunkerweb/www` and the `custom_www_owner` variable contains the owner that should be set for the files and folders. Here is an example using the Ansible inventory :
The `custom_site` variable can be used to specify a directory containing your application files (e.g : `my_app`) that will be copied to `/var/www/html` and the `custom_www_owner` variable contains the owner that should be set for the files and folders. Here is an example using the Ansible inventory :
```ini
[mybunkers]
192.168.0.42 variables_env="{{ playbook_dir }}/my_variables.env" custom_www="{{ playbook_dir }}/my_app" custom_www_owner="www-data"

View File

@ -1,5 +1,5 @@
mkdocs==1.4.2
mkdocs-material==8.5.7
mkdocs-material==8.5.9
pytablewriter==0.64.2
mike==1.1.2
jinja2<3.1.0

View File

@ -14,7 +14,7 @@ When settings are considered as "multiple", it means that you can have multiple
| Setting | Default | Context |Multiple| Description |
|-----------------------|------------------------------------------------------------------------------------------------------------------------|---------|--------|--------------------------------------------------|
|`TEMP_NGINX` |`no` |global |no |internal-use |
|`IS_LOADING` |`no` |global |no |Internal use : set to yes when BW is loading. |
|`NGINX_PREFIX` |`/etc/nginx/` |global |no |Where nginx will search for configurations. |
|`HTTP_PORT` |`8080` |global |no |HTTP port number which bunkerweb binds to. |
|`HTTPS_PORT` |`8443` |global |no |HTTPS port number which bunkerweb binds to. |
@ -73,20 +73,30 @@ When settings are considered as "multiple", it means that you can have multiple
### Blacklist
| Setting | Default | Context |Multiple| Description |
|---------------------------|------------------------------------------------------------------------------------------------------------------------------|---------|--------|------------------------------------------------------------------------------|
|`USE_BLACKLIST` |`yes` |multisite|no |Activate blacklist feature. |
|`BLACKLIST_IP_URLS` |`https://www.dan.me.uk/torlist/?exit` |global |no |List of URLs, separated with spaces, containing bad IP/network to block. |
|`BLACKLIST_IP` | |multisite|no |List of IP/network, separated with spaces, to block. |
|`BLACKLIST_RDNS` |`.shodan.io .censys.io` |multisite|no |List of reverse DNS suffixes, separated with spaces, to block. |
|`BLACKLIST_RDNS_URLS` | |global |no |List of URLs, separated with spaces, containing reverse DNS suffixes to block.|
|`BLACKLIST_RDNS_GLOBAL` |`yes` |multisite|no |Only perform RDNS blacklist checks on global IP addresses. |
|`BLACKLIST_ASN` | |multisite|no |List of ASN numbers, separated with spaces, to block. |
|`BLACKLIST_ASN_URLS` | |global |no |List of URLs, separated with spaces, containing ASN to block. |
|`BLACKLIST_USER_AGENT` | |multisite|no |List of User-Agent, separated with spaces, to block. |
|`BLACKLIST_USER_AGENT_URLS`|`https://raw.githubusercontent.com/mitchellkrogza/nginx-ultimate-bad-bot-blocker/master/_generator_lists/bad-user-agents.list`|global |no |List of URLs, separated with spaces, containing bad User-Agent to block. |
|`BLACKLIST_URI` | |multisite|no |List of URI, separated with spaces, to block. |
|`BLACKLIST_URI_URLS` | |global |no |List of URLs, separated with spaces, containing bad URI to block. |
| Setting | Default | Context |Multiple| Description |
|----------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------|--------|------------------------------------------------------------------------------------------------|
|`USE_BLACKLIST` |`yes` |multisite|no |Activate blacklist feature. |
|`BLACKLIST_IP_URLS` |`https://www.dan.me.uk/torlist/?exit` |global |no |List of URLs, separated with spaces, containing bad IP/network to block. |
|`BLACKLIST_IP` | |multisite|no |List of IP/network, separated with spaces, to block. |
|`BLACKLIST_RDNS` |`.shodan.io .censys.io` |multisite|no |List of reverse DNS suffixes, separated with spaces, to block. |
|`BLACKLIST_RDNS_URLS` | |global |no |List of URLs, separated with spaces, containing reverse DNS suffixes to block. |
|`BLACKLIST_RDNS_GLOBAL` |`yes` |multisite|no |Only perform RDNS blacklist checks on global IP addresses. |
|`BLACKLIST_ASN` | |multisite|no |List of ASN numbers, separated with spaces, to block. |
|`BLACKLIST_ASN_URLS` | |global |no |List of URLs, separated with spaces, containing ASN to block. |
|`BLACKLIST_USER_AGENT` | |multisite|no |List of User-Agent, separated with spaces, to block. |
|`BLACKLIST_USER_AGENT_URLS` |`https://raw.githubusercontent.com/mitchellkrogza/nginx-ultimate-bad-bot-blocker/master/_generator_lists/bad-user-agents.list`|global |no |List of URLs, separated with spaces, containing bad User-Agent to block. |
|`BLACKLIST_URI` | |multisite|no |List of URI, separated with spaces, to block. |
|`BLACKLIST_URI_URLS` | |global |no |List of URLs, separated with spaces, containing bad URI to block. |
|`BLACKLIST_IGNORE_IP_URLS` | |global |no |List of URLs, separated with spaces, containing IP/network to ignore in the blacklist. |
|`BLACKLIST_IGNORE_IP` | |multisite|no |List of IP/network, separated with spaces, to ignore in the blacklist. |
|`BLACKLIST_IGNORE_RDNS` | |multisite|no |List of reverse DNS suffixes, separated with spaces, to ignore in the blacklist. |
|`BLACKLIST_IGNORE_RDNS_URLS` | |global |no |List of URLs, separated with spaces, containing reverse DNS suffixes to ignore in the blacklist.|
|`BLACKLIST_IGNORE_ASN` | |multisite|no |List of ASN numbers, separated with spaces, to ignore in the blacklist. |
|`BLACKLIST_IGNORE_ASN_URLS` | |global |no |List of URLs, separated with spaces, containing ASN to ignore in the blacklist. |
|`BLACKLIST_IGNORE_USER_AGENT` | |multisite|no |List of User-Agent, separated with spaces, to ignore in the blacklist. |
|`BLACKLIST_IGNORE_USER_AGENT_URLS`| |global |no |List of URLs, separated with spaces, containing User-Agent to ignore in the blacklist. |
|`BLACKLIST_IGNORE_URI` | |multisite|no |List of URI, separated with spaces, to ignore in the blacklist. |
|`BLACKLIST_IGNORE_URI_URLS` | |global |no |List of URLs, separated with spaces, containing URI to ignore in the blacklist. |
### Brotli
@ -140,6 +150,12 @@ When settings are considered as "multiple", it means that you can have multiple
|`CUSTOM_HTTPS_CERT`| |multisite|no |Full path of the certificate or bundle file.|
|`CUSTOM_HTTPS_KEY` | |multisite|no |Full path of the key file. |
### DB
| Setting | Default |Context|Multiple| Description |
|--------------|----------------------------|-------|--------|--------------------------------------------------|
|`DATABASE_URI`|`sqlite:////data/db.sqlite3`|global |no |The database URI, following the sqlalchemy format.|
### DNSBL
| Setting | Default | Context |Multiple| Description |
@ -153,6 +169,23 @@ When settings are considered as "multiple", it means that you can have multiple
|--------|-------|---------|--------|-------------------------------------------------------------------------------------------------|
|`ERRORS`| |multisite|no |List of HTTP error code and corresponding error pages (404=/my404.html 403=/errors/403.html ...).|
### Greylist
| Setting |Default| Context |Multiple| Description |
|--------------------------|-------|---------|--------|----------------------------------------------------------------------------------------------|
|`USE_GREYLIST` |`no` |multisite|no |Activate greylist feature. |
|`GREYLIST_IP_URLS` | |global |no |List of URLs, separated with spaces, containing good IP/network to put into the greylist. |
|`GREYLIST_IP` | |multisite|no |List of IP/network, separated with spaces, to put into the greylist. |
|`GREYLIST_RDNS` | |multisite|no |List of reverse DNS suffixes, separated with spaces, to put into the greylist. |
|`GREYLIST_RDNS_URLS` | |global |no |List of URLs, separated with spaces, containing reverse DNS suffixes to put into the greylist.|
|`GREYLIST_RDNS_GLOBAL` |`yes` |multisite|no |Only perform RDNS greylist checks on global IP addresses. |
|`GREYLIST_ASN` | |multisite|no |List of ASN numbers, separated with spaces, to put into the greylist. |
|`GREYLIST_ASN_URLS` | |global |no |List of URLs, separated with spaces, containing ASN to put into the greylist. |
|`GREYLIST_USER_AGENT` | |multisite|no |List of User-Agent, separated with spaces, to put into the greylist. |
|`GREYLIST_USER_AGENT_URLS`| |global |no |List of URLs, separated with spaces, containing good User-Agent to put into the greylist. |
|`GREYLIST_URI` | |multisite|no |List of URI, separated with spaces, to put into the greylist. |
|`GREYLIST_URI_URLS` | |global |no |List of URLs, separated with spaces, containing bad URI to put into the greylist. |
### Gzip
| Setting | Default | Context |Multiple| Description |
@ -214,7 +247,7 @@ When settings are considered as "multiple", it means that you can have multiple
|`ALLOWED_METHODS` |`GET\|POST\|HEAD` |multisite|no |Allowed HTTP methods to be sent by clients. |
|`MAX_CLIENT_SIZE` |`10m` |multisite|no |Maximum body size (0 for infinite). |
|`SERVE_FILES` |`yes` |multisite|no |Serve files from the local folder. |
|`ROOT_FOLDER` | |multisite|no |Root folder containing files to serve (/opt/bunkerweb/www/{server_name} if unset). |
|`ROOT_FOLDER` | |multisite|no |Root folder containing files to serve (/var/www/html/{server_name} if unset). |
|`HTTPS_PROTOCOLS` |`TLSv1.2 TLSv1.3` |multisite|no |The supported version of TLS. We recommend the default value TLSv1.2 TLSv1.3 for compatibility reasons. |
|`HTTP2` |`yes` |multisite|no |Support HTTP2 protocol when HTTPS is enabled. |
|`LISTEN_HTTP` |`yes` |multisite|no |Respond to (insecure) HTTP requests. |
@ -228,11 +261,13 @@ When settings are considered as "multiple", it means that you can have multiple
### ModSecurity
| Setting | Default | Context |Multiple| Description |
|-----------------------------------|----------------|---------|--------|--------------------------------------------------|
| `USE_MODSECURITY` | `yes` |multisite|no | Enable ModSecurity WAF. |
| `USE_MODSECURITY_CRS` | `yes` |multisite|no | Enable OWASP Core Rule Set. |
| `MODSECURITY_SEC_AUDIT_ENGINE` | `RelevantOnly` |multisite|no | SecAuditEngine directive of ModSecurity. |
| Setting | Default | Context |Multiple| Description |
|---------------------------------|--------------|---------|--------|------------------------------------------|
|`USE_MODSECURITY` |`yes` |multisite|no |Enable ModSecurity WAF. |
|`USE_MODSECURITY_CRS` |`yes` |multisite|no |Enable OWASP Core Rule Set. |
|`MODSECURITY_SEC_AUDIT_ENGINE` |`RelevantOnly`|multisite|no |SecAuditEngine directive of ModSecurity. |
|`MODSECURITY_SEC_RULE_ENGINE` |`On` |multisite|no |SecRuleEngine directive of ModSecurity. |
|`MODSECURITY_SEC_AUDIT_LOG_PARTS`|`ABCFHZ` |multisite|no |SecAuditLogParts directive of ModSecurity.|
### PHP

View File

@ -115,7 +115,7 @@ Let's take the following logs as an example of ModSecurity detection using defau
```log
2022/04/26 12:01:10 [warn] 85#85: *11 ModSecurity: Warning. Matched "Operator `PmFromFile' with parameter `lfi-os-files.data' against variable `ARGS:id' (Value: `/etc/passwd' )
[file "/opt/bunkerweb/core/modsecurity/files/coreruleset/rules/REQUEST-930-APPLICATION-ATTACK-LFI.conf"]
[file "/usr/share/bunkerweb/core/modsecurity/files/coreruleset/rules/REQUEST-930-APPLICATION-ATTACK-LFI.conf"]
[line "78"]
[id "930120"]
[rev ""]
@ -139,7 +139,7 @@ Let's take the following logs as an example of ModSecurity detection using defau
[ref "o1,10v9,11t:utf8toUnicode,t:urlDecodeUni,t:normalizePathWin,t:lowercase"],
client: 172.17.0.1, server: localhost, request: "GET /?id=/etc/passwd HTTP/1.1", host: "localhost"
2022/04/26 12:01:10 [warn] 85#85: *11 ModSecurity: Warning. Matched "Operator `PmFromFile' with parameter `unix-shell.data' against variable `ARGS:id' (Value: `/etc/passwd' )
[file "/opt/bunkerweb/core/modsecurity/files/coreruleset/rules/REQUEST-932-APPLICATION-ATTACK-RCE.conf"]
[file "/usr/share/bunkerweb/core/modsecurity/files/coreruleset/rules/REQUEST-932-APPLICATION-ATTACK-RCE.conf"]
[line "480"]
[id "932160"]
[rev ""]
@ -163,7 +163,7 @@ Let's take the following logs as an example of ModSecurity detection using defau
[ref "o1,10v9,11t:urlDecodeUni,t:cmdLine,t:normalizePath,t:lowercase"],
client: 172.17.0.1, server: localhost, request: "GET /?id=/etc/passwd HTTP/1.1", host: "localhost"
2022/04/26 12:01:10 [error] 85#85: *11 [client 172.17.0.1] ModSecurity: Access denied with code 403 (phase 2). Matched "Operator `Ge' with parameter `5' against variable `TX:ANOMALY_SCORE' (Value: `10' )
[file "/opt/bunkerweb/core/modsecurity/files/coreruleset/rules/REQUEST-949-BLOCKING-EVALUATION.conf"]
[file "/usr/share/bunkerweb/core/modsecurity/files/coreruleset/rules/REQUEST-949-BLOCKING-EVALUATION.conf"]
[line "80"]
[id "949110"]
[rev ""]

View File

@ -196,7 +196,7 @@ Because the web UI is a web application, the recommended installation procedure
The installation of the web UI using the [Linux integration](/1.4/integrations/#linux) is pretty straightforward because it is installed with BunkerWeb.
The first thing to do is to edit the BunkerWeb configuration located at **/opt/bunkerweb/variables.env** to add settings related to the web UI :
The first thing to do is to edit the BunkerWeb configuration located at **/etc/bunkerweb/variables.env** to add settings related to the web UI :
```conf
HTTP_PORT=80
HTTPS_PORT=443
@ -225,7 +225,7 @@ Because the web UI is a web application, the recommended installation procedure
systemctl restart bunkerweb
```
You can edit the **/opt/bunkerweb/ui.env** file containing the settings of the web UI :
You can edit the **/etc/bunkerweb/ui.env** file containing the settings of the web UI :
```conf
ADMIN_USERNAME=admin
ADMIN_PASSWORD=changeme
@ -234,7 +234,7 @@ Because the web UI is a web application, the recommended installation procedure
Important things to note :
* `http(s)://bwadmin.example.com/changeme/` is the full base URL of the web UI (must match the sub(domain) and /changeme URL used in **/opt/bunkerweb/variables.env**)
* `http(s)://bwadmin.example.com/changeme/` is the full base URL of the web UI (must match the sub(domain) and /changeme URL used in **/etc/bunkerweb/variables.env**)
* replace the username `admin` and password `changeme` with strong ones
Restart the BunkerWeb UI service and you are now ready to access it :

View File

@ -21,4 +21,4 @@ systemctl stop bunkerweb
systemctl stop haproxy
systemctl start haproxy
echo "hello" > /opt/bunkerweb/www/index.html
echo "hello" > /var/www/html/index.html

View File

@ -13,7 +13,7 @@ else
echo "❌ No PHP user found"
exit 1
fi
cp -r ./bw-data/www/* /opt/bunkerweb/www
chown -R $user:nginx /opt/bunkerweb/www
find /opt/bunkerweb/www -type f -exec chmod 0640 {} \;
find /opt/bunkerweb/www -type d -exec chmod 0750 {} \;
cp -r ./bw-data/www/* /var/www/html
chown -R $user:nginx /var/www/html
find /var/www/html -type f -exec chmod 0640 {} \;
find /var/www/html -type d -exec chmod 0750 {} \;

View File

@ -12,8 +12,8 @@ app1.example.com_USE_CORS=yes
app1.example.com_CORS_ALLOW_ORIGIN=https://app2.example.com
app1.example.com_ALLOWED_METHODS=GET|POST|HEAD|OPTIONS
app1.example.com_LOCAL_PHP=/run/php/php-fpm.sock
app1.example.com_LOCAL_PHP_PATH=/opt/bunkerweb/www/app1.example.com
app1.example.com_LOCAL_PHP_PATH=/var/www/html/app1.example.com
app2.example.com_LOCAL_PHP=/run/php/php-fpm.sock
app2.example.com_LOCAL_PHP_PATH=/opt/bunkerweb/www/app2.example.com
app2.example.com_LOCAL_PHP_PATH=/var/www/html/app2.example.com
app3.example.com_LOCAL_PHP=/run/php/php-fpm.sock
app3.example.com_LOCAL_PHP_PATH=/opt/bunkerweb/www/app3.example.com
app3.example.com_LOCAL_PHP_PATH=/var/www/html/app3.example.com

Some files were not shown because too many files have changed in this diff Show More