Merge pull request #323 from TheophileDiot/1.5

Linting + starting to migrate bunkerweb to the 1.5
This commit is contained in:
Théophile Diot 2022-10-19 17:39:19 +02:00 committed by GitHub
commit 54530d535d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4949 changed files with 20734 additions and 15390 deletions

View File

@ -3,8 +3,7 @@ name: Bug report
about: Something is not working as expected
title: "[BUG]"
labels: bug
assignees: ''
assignees: ""
---
**Description**

View File

@ -3,8 +3,7 @@ name: Documentation enhancement
about: Error in the documentation or something is missing
title: "[DOC]"
labels: documentation
assignees: ''
assignees: ""
---
**Description**

View File

@ -3,8 +3,7 @@ name: Feature request
about: Suggest an idea for this project
title: "[FEATURE]"
labels: enhancement
assignees: ''
assignees: ""
---
**What's needed and why ?**

View File

@ -5,7 +5,6 @@ on:
branches: [dev]
jobs:
# Build for amd64
build-bw-amd64:
runs-on: ubuntu-latest
@ -62,7 +61,6 @@ jobs:
build-bw-386:
runs-on: ubuntu-latest
steps:
# Prepare
- name: Checkout source code
uses: actions/checkout@v3
@ -292,10 +290,16 @@ jobs:
# Run tests
tests:
needs: [build-bw-amd64, build-bw-ubuntu, build-bw-debian, build-bw-centos, build-bw-fedora]
needs:
[
build-bw-amd64,
build-bw-ubuntu,
build-bw-debian,
build-bw-centos,
build-bw-fedora,
]
runs-on: [self-hosted, X64]
steps:
# Prepare
- name: Checkout source code
uses: actions/checkout@v3
@ -318,7 +322,7 @@ jobs:
else
echo "BUILD_MODE=dev" >> $GITHUB_ENV
fi
# Import images to local registry
- name: Import BW image
run: docker pull ${{ secrets.PRIVATE_REGISTRY }}/infra/bunkerweb-tests-amd64:latest && docker tag ${{ secrets.PRIVATE_REGISTRY }}/infra/bunkerweb-tests-amd64:latest 10.20.1.1:5000/bw-tests:latest && docker push 10.20.1.1:5000/bw-tests:latest
@ -398,7 +402,6 @@ jobs:
needs: [tests, build-bw-386, build-bw-arm]
runs-on: ubuntu-latest
steps:
# Prepare
- name: Checkout source code
uses: actions/checkout@v3
@ -471,7 +474,6 @@ jobs:
needs: tests
runs-on: [self-hosted, X64]
steps:
- name: Check out repository code
uses: actions/checkout@v3
@ -545,7 +547,6 @@ jobs:
needs: push-docker
runs-on: ubuntu-latest
steps:
- name: Checkout source code
uses: actions/checkout@v3
@ -556,4 +557,4 @@ jobs:
kubeconfig: ${{ secrets.KUBE_CONFIG_STAGING }}
- name: k8s deploy (staging)
run: kubectl rollout restart deployment bunkerweb-controller && kubectl rollout restart daemonset bunkerweb
run: kubectl rollout restart deployment bunkerweb-controller && kubectl rollout restart daemonset bunkerweb

View File

@ -5,7 +5,6 @@ on:
branches: [master]
jobs:
# Build for amd64
build-bw-amd64:
runs-on: ubuntu-latest
@ -59,7 +58,6 @@ jobs:
build-bw-386:
runs-on: ubuntu-latest
steps:
# Prepare
- name: Checkout source code
uses: actions/checkout@v3
@ -275,10 +273,16 @@ jobs:
# Run tests
tests:
needs: [build-bw-amd64, build-bw-ubuntu, build-bw-debian, build-bw-centos, build-bw-fedora]
needs:
[
build-bw-amd64,
build-bw-ubuntu,
build-bw-debian,
build-bw-centos,
build-bw-fedora,
]
runs-on: [self-hosted, X64]
steps:
# Prepare
- name: Checkout source code
uses: actions/checkout@v3
@ -301,7 +305,7 @@ jobs:
else
echo "BUILD_MODE=dev" >> $GITHUB_ENV
fi
# Import images to local registry
- name: Import BW image
run: docker pull ${{ secrets.PRIVATE_REGISTRY }}/infra/bunkerweb-tests-amd64:latest && docker tag ${{ secrets.PRIVATE_REGISTRY }}/infra/bunkerweb-tests-amd64:latest 10.20.1.1:5000/bw-tests:latest && docker push 10.20.1.1:5000/bw-tests:latest
@ -355,13 +359,13 @@ jobs:
# Run tests
#- name: Run Docker tests
#run: ./tests/main.py "docker"
#run: ./tests/main.py "docker"
#- name: Run Autoconf tests
#run: ./tests/main.py "autoconf"
#run: ./tests/main.py "autoconf"
#- name: Run Swarm tests
#run: ./tests/main.py "swarm"
#run: ./tests/main.py "swarm"
#- name: Run Kubernetes tests
#run: ./tests/main.py "kubernetes"
#run: ./tests/main.py "kubernetes"
- name: Generate Linux packages and build test images
run: ./tests/linux.sh ${{ env.BUILD_MODE }}
- name: Run Linux Ubuntu tests
@ -378,7 +382,6 @@ jobs:
needs: [tests, build-bw-386, build-bw-arm]
runs-on: ubuntu-latest
steps:
# Prepare
- name: Checkout source code
uses: actions/checkout@v3
@ -455,7 +458,6 @@ jobs:
needs: tests
runs-on: [self-hosted, X64]
steps:
- name: Check out repository code
uses: actions/checkout@v3
@ -526,18 +528,18 @@ jobs:
# Deploy to staging infrastructure
# deploy:
# needs: push-docker
# runs-on: ubuntu-latest
# steps:
# needs: push-docker
# runs-on: ubuntu-latest
# steps:
# - name: Checkout source code
# uses: actions/checkout@v3
# - name: Checkout source code
# uses: actions/checkout@v3
# - name: k8s login (staging)
# uses: azure/k8s-set-context@v2
# with:
# method: kubeconfig
# kubeconfig: ${{ secrets.KUBE_CONFIG_STAGING }}
# - name: k8s login (staging)
# uses: azure/k8s-set-context@v2
# with:
# method: kubeconfig
# kubeconfig: ${{ secrets.KUBE_CONFIG_STAGING }}
# - name: k8s deploy (staging)
# run: kubectl rollout restart deployment bunkerweb-controller && kubectl rollout restart daemonset bunkerweb
# - name: k8s deploy (staging)
# run: kubectl rollout restart deployment bunkerweb-controller && kubectl rollout restart daemonset bunkerweb

2
.gitignore vendored
View File

@ -3,3 +3,5 @@ site/
.vscode/
**/__pycache__/
ui/env/
docs/env/
env/

12
.prettierignore Normal file
View File

@ -0,0 +1,12 @@
docs/
env/
*/env/
*.min*
bw/core/modsecurity/
bw/deps/src/
mkdocs.yml
CHANGELOG.md
CONTRIBUTING.md
LICENSE.md
README.md
SECURITY.md

View File

@ -1,7 +1,7 @@
FROM nginx:1.20.2-alpine AS builder
# Copy dependencies sources folder
COPY deps /tmp/bunkerweb/deps
COPY bw/deps /tmp/bunkerweb/deps
# Compile and install dependencies
RUN apk add --no-cache --virtual build bash build autoconf libtool automake geoip-dev g++ gcc curl-dev libxml2-dev pcre-dev make linux-headers musl-dev gd-dev gnupg brotli-dev openssl-dev patch readline-dev && \
@ -11,10 +11,10 @@ RUN apk add --no-cache --virtual build bash build autoconf libtool automake geoi
apk del build
# Copy python requirements
COPY deps/requirements.txt /opt/bunkerweb/deps/requirements.txt
COPY bw/deps/requirements.txt /opt/bunkerweb/deps/requirements.txt
# Install python requirements
RUN apk add --no-cache --virtual build py3-pip gcc python3-dev musl-dev libffi-dev openssl-dev cargo && \
RUN apk add --no-cache --virtual build py3-pip g++ gcc python3-dev musl-dev libffi-dev openssl-dev cargo && \
pip install wheel && \
mkdir /opt/bunkerweb/deps/python && \
pip install --no-cache-dir --require-hashes --target /opt/bunkerweb/deps/python -r /opt/bunkerweb/deps/requirements.txt && \
@ -27,17 +27,14 @@ COPY --from=builder /opt/bunkerweb /opt/bunkerweb
# Copy files
# can't exclude deps from . so we are copying everything by hand
COPY api /opt/bunkerweb/api
COPY cli /opt/bunkerweb/cli
COPY confs /opt/bunkerweb/confs
COPY core /opt/bunkerweb/core
COPY gen /opt/bunkerweb/gen
COPY helpers /opt/bunkerweb/helpers
COPY job /opt/bunkerweb/job
COPY lua /opt/bunkerweb/lua
COPY misc /opt/bunkerweb/misc
COPY bw/api /opt/bunkerweb/api
COPY bw/core /opt/bunkerweb/core
COPY bw/cli /opt/bunkerweb/cli
COPY bw/helpers /opt/bunkerweb/helpers
COPY bw/lua /opt/bunkerweb/lua
COPY bw/misc /opt/bunkerweb/misc
COPY bw/temp_nginx /etc/nginx
COPY utils /opt/bunkerweb/utils
COPY settings.json /opt/bunkerweb/settings.json
COPY VERSION /opt/bunkerweb/VERSION
# Install runtime dependencies, pypi packages, move bwcli, create data folders and set permissions
@ -46,8 +43,7 @@ RUN apk add --no-cache bash python3 libgcc libstdc++ openssl git && \
chmod 750 /opt/bunkerweb/modules && \
chmod 740 /opt/bunkerweb/modules/*.so && \
cp /opt/bunkerweb/helpers/bwcli /usr/local/bin && \
mkdir /opt/bunkerweb/configs && \
for dir in $(echo "cache configs configs/http configs/stream configs/server-http configs/server-stream configs/default-server-http configs/default-server-stream configs/modsec configs/modsec-crs letsencrypt plugins www") ; do mkdir -p "/data/${dir}" && ln -s "/data/${dir}" "/opt/bunkerweb/${dir}" ; done && \
for dir in $(echo "cache configs configs/http configs/stream configs/server-http configs/server-stream configs/default-server-http configs/default-server-stream configs/modsec configs/modsec-crs cache/letsencrypt plugins www") ; do mkdir -p "/data/${dir}" && ln -s "/data/${dir}" "/opt/bunkerweb/${dir}" ; done && \
chown -R root:nginx /data && \
chmod -R 770 /data && \
mkdir /opt/bunkerweb/tmp && \
@ -55,11 +51,9 @@ RUN apk add --no-cache bash python3 libgcc libstdc++ openssl git && \
find /opt/bunkerweb -type f -exec chmod 0740 {} \; && \
find /opt/bunkerweb -type d -exec chmod 0750 {} \; && \
chmod 770 /opt/bunkerweb/cache /opt/bunkerweb/tmp && \
chmod 750 /opt/bunkerweb/gen/main.py /opt/bunkerweb/job/main.py /opt/bunkerweb/cli/main.py /opt/bunkerweb/helpers/*.sh /usr/local/bin/bwcli /opt/bunkerweb/deps/python/bin/* && \
find /opt/bunkerweb/core/*/jobs/* -type f -exec chmod 750 {} \; && \
chmod 750 /opt/bunkerweb/cli/main.py /opt/bunkerweb/helpers/*.sh /usr/local/bin/bwcli /opt/bunkerweb/deps/python/bin/* && \
chown root:nginx /usr/local/bin/bwcli && \
chown -R nginx:nginx /etc/nginx && \
ln -s /data/letsencrypt /etc/letsencrypt && \
mkdir /var/log/letsencrypt /var/lib/letsencrypt && \
chown root:nginx /var/log/letsencrypt /var/lib/letsencrypt && \
chmod 770 /var/log/letsencrypt /var/lib/letsencrypt && \
@ -75,7 +69,7 @@ RUN apk add --no-cache bash python3 libgcc libstdc++ openssl git && \
# Fix CVEs
RUN apk add "freetype>=2.10.4-r3" "curl>=7.79.1-r2" "libcurl>=7.79.1-r2" "openssl>=1.1.1q-r0" "libssl1.1>=1.1.1q-r0" "libcrypto1.1>=1.1.1q-r0" "git>=2.32.3-r0" "ncurses-libs>=6.2_p20210612-r1" "ncurses-terminfo-base>=6.2_p20210612-r1" "zlib>=1.2.12-r2" "libxml2>=2.9.14-r1"
VOLUME /data
VOLUME /data /etc/nginx
EXPOSE 8080/tcp 8443/tcp

View File

@ -1,32 +0,0 @@
from requests import request
class API :
def __init__(self, endpoint, host="bwapi") :
self.__endpoint = endpoint
self.__host = host
def get_endpoint(self) :
return self.__endpoint
def get_host(self) :
return self.__host
def request(self, method, url, data=None, files=None, timeout=(10, 30)) :
try :
headers = {}
headers["User-Agent"] = "bwapi"
headers["Host"] = self.__host
if type(data) is dict :
resp = request(method, self.__endpoint + url, json=data, timeout=timeout, headers=headers)
elif type(data) is bytes :
resp = request(method, self.__endpoint + url, data=data, timeout=timeout, headers=headers)
elif files is not None :
resp = request(method, self.__endpoint + url, files=files, timeout=timeout, headers=headers)
elif data is None :
resp = request(method, self.__endpoint + url, timeout=timeout, headers=headers)
else :
return False, "unsupported data type", None, None
except Exception as e :
return False, str(e), None, None
return True, "ok", resp.status_code, resp.json()

View File

@ -1,246 +1,240 @@
from logging import Logger
from traceback import format_exc
from threading import Thread, Lock
from time import sleep
from subprocess import run, DEVNULL, STDOUT
from glob import glob
from shutil import rmtree
from os import makedirs, remove, listdir
from os import getenv, makedirs, remove, listdir
from os.path import dirname, isdir
from json import loads
from typing import Tuple
from API import API
from JobScheduler import JobScheduler
from ApiCaller import ApiCaller
from ConfigCaller import ConfigCaller
from logger import log
from Database import Database
from logger import setup_logger
class Config(ApiCaller, ConfigCaller) :
def __init__(self, ctrl_type, lock=None) :
class Config(ApiCaller, ConfigCaller):
def __init__(self, ctrl_type, lock=None):
ApiCaller.__init__(self)
ConfigCaller.__init__(self)
self.__ctrl_type = ctrl_type
self.__lock = lock
self.__logger = setup_logger("Config", getenv("LOG_LEVEL", "INFO"))
self.__db = None
self.__instances = []
self.__services = []
self.__configs = []
self.__config = {}
self.__scheduler = None
self.__scheduler_thread = None
self.__schedule = False
self.__schedule_lock = Lock()
def __get_full_env(self) :
def __get_full_env(self) -> dict:
env_instances = {}
for instance in self.__instances :
for variable, value in instance["env"].items() :
for instance in self.__instances:
for variable, value in instance["env"].items():
env_instances[variable] = value
env_services = {}
if not "SERVER_NAME" in env_instances :
if not "SERVER_NAME" in env_instances:
env_instances["SERVER_NAME"] = ""
for service in self.__services :
for variable, value in service.items() :
env_services[service["SERVER_NAME"].split(" ")[0] + "_" + variable] = value
if env_instances["SERVER_NAME"] != "" :
for service in self.__services:
for variable, value in service.items():
env_services[
f"{service['SERVER_NAME'].split(' ')[0]}_{variable}"
] = value
if env_instances["SERVER_NAME"] != "":
env_instances["SERVER_NAME"] += " "
env_instances["SERVER_NAME"] += service["SERVER_NAME"].split(" ")[0]
return self._full_env(env_instances, env_services)
def __scheduler_run_pending(self) :
schedule = True
while schedule :
self.__scheduler.run_pending()
sleep(1)
self.__schedule_lock.acquire()
schedule = self.__schedule
self.__schedule_lock.release()
def update_needed(self, instances, services, configs=None) :
if instances != self.__instances :
def update_needed(self, instances, services, configs=None) -> bool:
if instances != self.__instances:
return True
if services != self.__services :
if services != self.__services:
return True
if not configs is None and configs != self.__configs :
if not configs is None and configs != self.__configs:
return True
return False
def __get_config(self) :
def __get_config(self) -> dict:
config = {}
# extract instances variables
for instance in self.__instances :
for variable, value in instance["env"].items() :
for instance in self.__instances:
for variable, value in instance["env"].items():
config[variable] = value
# extract services variables
server_names = []
for service in self.__services :
for service in self.__services:
first_server = service["SERVER_NAME"].split(" ")[0]
if not first_server in server_names :
if not first_server in server_names:
server_names.append(first_server)
for variable, value in service.items() :
config[first_server + "_" + variable] = value
for variable, value in service.items():
config[f"{first_server}_{variable}"] = value
config["SERVER_NAME"] = " ".join(server_names)
return config
def __get_apis(self) :
def __get_apis(self) -> list:
apis = []
for instance in self.__instances :
endpoint = "http://" + instance["hostname"] + ":5000"
host = "bwapi"
if "API_SERVER_NAME" in instance["env"] :
host = instance["env"]["API_SERVER_NAME"]
for instance in self.__instances:
endpoint = f"http://{instance['hostname']}:{instance['env'].get('API_HTTP_PORT', '5000')}"
host = instance["env"].get("API_SERVER_NAME", "bwapi")
apis.append(API(endpoint, host=host))
return apis
def __write_configs(self) :
def __write_configs(self) -> Tuple[bool, list]:
ret = True
for config_type in self.__configs :
for file, data in self.__configs[config_type].items() :
path = "/data/configs/" + config_type + "/" + file
if not path.endswith(".conf") :
custom_configs = []
for config_type in self.__configs:
for file, data in self.__configs[config_type].items():
path = f"/data/configs/{config_type}/{file}"
if not path.endswith(".conf"):
path += ".conf"
makedirs(dirname(path), exist_ok=True)
try :
try:
mode = "w"
if type(data) is bytes :
if type(data) is bytes:
mode = "wb"
with open(path, mode) as f :
f.write(data)
except :
print(format_exc())
log("CONFIG", "", "Can't save file " + path)
ret = False
return ret
def __remove_configs(self) :
ret = True
for config_type in self.__configs :
for file, data in self.__configs[config_type].items() :
path = "/data/configs/" + config_type + "/" + file
if not path.endswith(".conf") :
path += ".conf"
try :
remove(path)
except :
with open(path, mode) as f:
f.write(data)
exploded = file.split("/")
custom_configs.append(
{
"value": data if mode == "w" else data.decode("utf-8"),
"exploded": [exploded[0], config_type, exploded[1]],
}
)
except:
print(format_exc())
log("CONFIG", "", "Can't remove file " + path)
self.__logger.error(f"Can't save file {path}")
ret = False
return ret, custom_configs
def __remove_configs(self) -> bool:
ret = True
for config_type in self.__configs:
for file, _ in self.__configs[config_type].items():
path = f"/data/configs/{config_type}/{file}"
if not path.endswith(".conf"):
path += ".conf"
try:
remove(path)
except:
print(format_exc())
self.__logger.error(f"Can't remove file {path}")
ret = False
check_empty_dirs = []
for type in ["server-http", "modsec", "modsec-crs"] :
check_empty_dirs.extend(glob("/data/configs/" + type + "/*"))
for check_empty_dir in check_empty_dirs :
if isdir(check_empty_dir) and len(listdir(check_empty_dir)) == 0 :
try :
for _type in ["server-http", "modsec", "modsec-crs"]:
check_empty_dirs.extend(glob(f"/data/configs/{type}/*"))
for check_empty_dir in check_empty_dirs:
if isdir(check_empty_dir) and len(listdir(check_empty_dir)) == 0:
try:
rmtree(check_empty_dir)
except :
except:
print(format_exc())
log("CONFIG", "", "Can't remove directory " + check_empty_dir)
self.__logger.error(f"Can't remove directory {check_empty_dir}")
ret = False
return ret
def apply(self, instances, services, configs=None) :
def apply(self, instances, services, configs=None) -> bool:
success = True
# stop scheduler just in case caller didn't do it
self.stop_scheduler()
# remove old autoconf configs if it exists
if self.__configs:
ret = self.__remove_configs()
if not ret :
if not ret:
success = False
log("CONFIG", "", "removing custom configs failed, configuration will not work as expected...")
self.__logger.error(
"removing custom configs failed, configuration will not work as expected...",
)
# update values
self.__instances = instances
self.__services = services
self.__configs = configs
self.__config = self.__get_full_env()
if self.__db is None:
self.__db = Database(
self.__logger, sqlalchemy_string=self.__config.get("DATABASE_URI", None)
)
self._set_apis(self.__get_apis())
# write configs
if configs != None :
ret = self.__write_configs()
if not ret :
if configs != None:
ret = self.__db.save_config(self.__config, "autoconf")
if ret:
self.__logger.error(
f"Can't save autoconf config in database: {ret}",
)
ret, custom_configs = self.__write_configs()
if not ret:
success = False
log("CONFIG", "", "saving custom configs failed, configuration will not work as expected...")
self.__logger.error(
"saving custom configs failed, configuration will not work as expected...",
)
ret = self.__db.save_custom_configs(custom_configs, "autoconf")
if ret:
self.__logger.error(
f"Can't save autoconf custom configs in database: {ret}",
)
else:
ret = self.__db.save_config({}, "autoconf")
if ret:
self.__logger.error(
f"Can't remove autoconf config from the database: {ret}",
)
# get env
env = self.__get_full_env()
# run jobs once
i = 1
for instance in self.__instances :
endpoint = "http://" + instance["hostname"] + ":5000"
host = "bwapi"
if "API_SERVER_NAME" in instance["env"] :
host = instance["env"]["API_SERVER_NAME"]
env["CLUSTER_INSTANCE_" + str(i)] = endpoint + " " + host
for instance in self.__instances:
endpoint = f"http://{instance['hostname']}:{instance['env'].get('API_HTTP_PORT', '5000')}"
host = instance["env"].get("API_SERVER_NAME", "bwapi")
env[f"CLUSTER_INSTANCE_{i}"] = f"{endpoint} {host}"
i += 1
if self.__scheduler is None :
self.__scheduler = JobScheduler(env=env, lock=self.__lock, apis=self._get_apis())
ret = self.__scheduler.reload(env, apis=self._get_apis())
if not ret :
success = False
log("CONFIG", "", "scheduler.reload() failed, configuration will not work as expected...")
# write config to /tmp/variables.env
with open("/tmp/variables.env", "w") as f :
for variable, value in self.__config.items() :
f.write(variable + "=" + value + "\n")
with open("/tmp/variables.env", "w") as f:
for variable, value in self.__config.items():
f.write(f"{variable}={value}\n")
# run the generator
cmd = "python /opt/bunkerweb/gen/main.py --settings /opt/bunkerweb/settings.json --templates /opt/bunkerweb/confs --output /etc/nginx --variables /tmp/variables.env"
cmd = f"python /opt/bunkerweb/gen/main.py --settings /opt/bunkerweb/settings.json --templates /opt/bunkerweb/confs --output /etc/nginx --variables /tmp/variables.env --method autoconf"
proc = run(cmd.split(" "), stdin=DEVNULL, stderr=STDOUT)
if proc.returncode != 0 :
if proc.returncode != 0:
success = False
log("CONFIG", "", "config generator failed, configuration will not work as expected...")
self.__logger.error(
"config generator failed, configuration will not work as expected...",
)
# cmd = "chown -R root:101 /etc/nginx"
# run(cmd.split(" "), stdin=DEVNULL, stdout=DEVNULL, stderr=STDOUT)
# cmd = "chmod -R 770 /etc/nginx"
# cmd = "chmod -R 770 /etc/nginx"
# run(cmd.split(" "), stdin=DEVNULL, stdout=DEVNULL, stderr=STDOUT)
# send nginx configs
# send data folder
# reload nginx
ret = self._send_files("/etc/nginx", "/confs")
if not ret :
if not ret:
success = False
log("CONFIG", "", "sending nginx configs failed, configuration will not work as expected...")
ret = self._send_files("/data", "/data")
if not ret :
self.__logger.error(
"sending nginx configs failed, configuration will not work as expected...",
)
ret = self._send_files("/data/configs", "/custom_configs")
if not ret:
success = False
log("CONFIG", "", "sending custom configs failed, configuration will not work as expected...")
self.__logger.error(
"sending custom configs failed, configuration will not work as expected...",
)
ret = self._send_to_apis("POST", "/reload")
if not ret :
if not ret:
success = False
log("CONFIG", "", "reload failed, configuration will not work as expected...")
self.__logger.error(
"reload failed, configuration will not work as expected...",
)
return success
def start_scheduler(self) :
if self.__scheduler_thread is not None and self.__scheduler_thread.is_alive() :
raise Exception("scheduler is already running, can't run it twice")
self.__schedule = True
self.__scheduler_thread = Thread(target=self.__scheduler_run_pending)
self.__scheduler_thread.start()
def stop_scheduler(self) :
if self.__scheduler_thread is not None and self.__scheduler_thread.is_alive() :
self.__schedule_lock.acquire()
self.__schedule = False
self.__schedule_lock.release()
self.__scheduler_thread.join()
self.__scheduler_thread = None
def reload_scheduler(self, env) :
if self.__scheduler_thread is None :
return self.__scheduler.reload(env=env, apis=self._get_apis())
def __get_scheduler(self, env) :
self.__schedule_lock.acquire()
if self.__schedule :
self.__schedule_lock.release()
raise Exception("can't create new scheduler, old one is still running...")
self.__schedule_lock.release()
return JobScheduler(env=env, lock=self.__lock, apis=self._get_apis())

View File

@ -1,85 +1,98 @@
from abc import ABC, abstractmethod
from os import getenv
from time import sleep
from Config import Config
from logger import log
from logger import setup_logger
class Controller(ABC) :
def __init__(self, ctrl_type, lock=None) :
class Controller(ABC):
def __init__(self, ctrl_type, lock=None):
self._type = ctrl_type
self._instances = []
self._services = []
self._supported_config_types = ["http", "stream", "server-http", "server-stream", "default-server-http", "modsec", "modsec-crs"]
self._supported_config_types = [
"http",
"stream",
"server-http",
"server-stream",
"default-server-http",
"modsec",
"modsec-crs",
]
self._configs = {}
for config_type in self._supported_config_types :
for config_type in self._supported_config_types:
self._configs[config_type] = {}
self._config = Config(ctrl_type, lock)
self.__logger = setup_logger("Controller", getenv("LOG_LEVEL", "INFO"))
def wait(self, wait_time) :
while True :
def wait(self, wait_time):
while True:
self._instances = self.get_instances()
if len(self._instances) == 0 :
log("CONTROLLER", "⚠️", "No instance found, waiting " + str(wait_time) + "s ...")
if len(self._instances) == 0:
self.__logger.warning(
f"No instance found, waiting {wait_time}s ...",
)
sleep(wait_time)
continue
all_ready = True
for instance in self._instances :
if not instance["health"] :
log("CONTROLLER", "⚠️", "Instance " + instance["name"] + " is not ready, waiting " + str(wait_time) + "s ...")
for instance in self._instances:
if not instance["health"]:
self.__logger.warning(
f"Instance {instance['name']} is not ready, waiting {wait_time}s ...",
)
sleep(wait_time)
all_ready = False
break
if all_ready :
if all_ready:
break
return self._instances
@abstractmethod
def _get_controller_instances(self) :
pass
@abstractmethod
def _to_instances(self, controller_instance) :
def _get_controller_instances(self):
pass
def get_instances(self) :
@abstractmethod
def _to_instances(self, controller_instance):
pass
def get_instances(self):
instances = []
for controller_instance in self._get_controller_instances() :
for instance in self._to_instances(controller_instance) :
for controller_instance in self._get_controller_instances():
for instance in self._to_instances(controller_instance):
instances.append(instance)
return instances
@abstractmethod
def _get_controller_services(self) :
pass
@abstractmethod
def _to_services(self, controller_service) :
def _get_controller_services(self):
pass
@abstractmethod
def _get_static_services(self) :
def _to_services(self, controller_service):
pass
def get_services(self) :
@abstractmethod
def _get_static_services(self):
pass
def get_services(self):
services = []
for controller_service in self._get_controller_services() :
for service in self._to_services(controller_service) :
for controller_service in self._get_controller_services():
for service in self._to_services(controller_service):
services.append(service)
for static_service in self._get_static_services() :
for static_service in self._get_static_services():
services.append(static_service)
return services
@abstractmethod
def get_configs(self) :
def get_configs(self):
pass
@abstractmethod
def apply_config(self) :
def apply_config(self):
pass
@abstractmethod
def process_events(self) :
pass
def process_events(self):
pass

View File

@ -1,123 +1,138 @@
import traceback
from os import getenv
from docker import DockerClient
from glob import glob
from os.path import basename
from re import search
from traceback import format_exc
from Controller import Controller
from ConfigCaller import ConfigCaller
from logger import log
from logger import setup_logger
class DockerController(Controller, ConfigCaller) :
def __init__(self, docker_host) :
class DockerController(Controller, ConfigCaller):
def __init__(self, docker_host):
super().__init__("docker")
ConfigCaller.__init__(self)
self.__client = DockerClient(base_url=docker_host)
self.__logger = setup_logger("docker-controller", getenv("LOG_LEVEL", "INFO"))
def _get_controller_instances(self) :
return self.__client.containers.list(filters={"label" : "bunkerweb.AUTOCONF"})
def _to_instances(self, controller_instance) :
def _get_controller_instances(self):
return self.__client.containers.list(filters={"label": "bunkerweb.INSTANCE"})
def _to_instances(self, controller_instance):
instance = {}
instance["name"] = controller_instance.name
instance["hostname"] = controller_instance.name
instance["health"] = controller_instance.status == "running" and controller_instance.attrs["State"]["Health"]["Status"] == "healthy"
instance["health"] = (
controller_instance.status == "running"
and controller_instance.attrs["State"]["Health"]["Status"] == "healthy"
)
instance["env"] = {}
for env in controller_instance.attrs["Config"]["Env"] :
for env in controller_instance.attrs["Config"]["Env"]:
variable = env.split("=")[0]
value = env.replace(variable + "=", "", 1)
if self._is_setting(variable) :
value = env.replace(f"{variable}=", "", 1)
if self._is_setting(variable):
instance["env"][variable] = value
return [instance]
def _get_controller_services(self) :
return self.__client.containers.list(filters={"label" : "bunkerweb.SERVER_NAME"})
def _to_services(self, controller_service) :
def _get_controller_services(self):
return self.__client.containers.list(filters={"label": "bunkerweb.SERVER_NAME"})
def _to_services(self, controller_service):
service = {}
for variable, value in controller_service.labels.items() :
if not variable.startswith("bunkerweb.") :
for variable, value in controller_service.labels.items():
if not variable.startswith("bunkerweb."):
continue
real_variable = variable.replace("bunkerweb.", "", 1)
if not self._is_multisite_setting(real_variable) :
if not self._is_multisite_setting(real_variable):
continue
service[real_variable] = value
return [service]
def _get_static_services(self) :
def _get_static_services(self):
services = []
variables = {}
for instance in self.__client.containers.list(filters={"label" : "bunkerweb.AUTOCONF"}) :
for env in instance.attrs["Config"]["Env"] :
for instance in self.__client.containers.list(
filters={"label": "bunkerweb.INSTANCE"}
):
for env in instance.attrs["Config"]["Env"]:
variable = env.split("=")[0]
value = env.replace(variable + "=", "", 1)
value = env.replace(f"{variable}=", "", 1)
variables[variable] = value
server_names = []
if "SERVER_NAME" in variables and variables["SERVER_NAME"] != "" :
if "SERVER_NAME" in variables and variables["SERVER_NAME"] != "":
server_names = variables["SERVER_NAME"].split(" ")
for server_name in server_names :
for server_name in server_names:
service = {}
service["SERVER_NAME"] = server_name
for variable, value in variables.items() :
for variable, value in variables.items():
prefix = variable.split("_")[0]
real_variable = variable.replace(prefix + "_", "", 1)
if prefix == server_name and self._is_multisite_setting(real_variable) :
real_variable = variable.replace(f"{prefix}_", "", 1)
if prefix == server_name and self._is_multisite_setting(real_variable):
service[real_variable] = value
services.append(service)
return services
def get_configs(self) :
def get_configs(self):
configs = {}
for config_type in self._supported_config_types :
for config_type in self._supported_config_types:
configs[config_type] = {}
# get site configs from labels
for container in self.__client.containers.list(filters={"label" : "bunkerweb.SERVER_NAME"}) :
for container in self.__client.containers.list(
filters={"label": "bunkerweb.SERVER_NAME"}
):
# extract server_name
server_name = ""
for variable, value in container.labels.items() :
if not variable.startswith("bunkerweb.") :
for variable, value in container.labels.items():
if not variable.startswith("bunkerweb."):
continue
real_variable = variable.replace("bunkerweb.", "", 1)
if real_variable == "SERVER_NAME" :
if real_variable == "SERVER_NAME":
server_name = value.split(" ")[0]
break
# extract configs
if server_name == "" :
if server_name == "":
continue
for variable, value in container.labels.items() :
if not variable.startswith("bunkerweb.") :
for variable, value in container.labels.items():
if not variable.startswith("bunkerweb."):
continue
real_variable = variable.replace("bunkerweb.", "", 1)
result = search(r"^CUSTOM_CONF_(SERVER_HTTP|MODSEC|MODSEC_CRS)_(.+)$", real_variable)
if result is None :
result = search(
r"^CUSTOM_CONF_(SERVER_HTTP|MODSEC|MODSEC_CRS)_(.+)$", real_variable
)
if result is None:
continue
cfg_type = result.group(1).lower().replace("_", "-")
cfg_name = result.group(2)
configs[cfg_type][server_name + "/" + cfg_name] = value
configs[cfg_type][f"{server_name}/{cfg_name}"] = value
return configs
def apply_config(self) :
self._config.stop_scheduler()
def apply_config(self):
ret = self._config.apply(self._instances, self._services, configs=self._configs)
self._config.start_scheduler()
return ret
def process_events(self) :
for event in self.__client.events(decode=True, filters={"type": "container"}) :
def process_events(self):
for event in self.__client.events(decode=True, filters={"type": "container"}):
self._instances = self.get_instances()
self._services = self.get_services()
self._configs = self.get_configs()
if not self._config.update_needed(self._instances, self._services, configs=self._configs) :
if not self._config.update_needed(
self._instances, self._services, configs=self._configs
):
continue
log("DOCKER-CONTROLLER", "", "Catched docker event, deploying new configuration ...")
try :
self.__logger.info(
"Catched docker event, deploying new configuration ...",
)
try:
ret = self.apply_config()
if not ret :
log("DOCKER-CONTROLLER", "", "Error while deploying new configuration")
else :
log("DOCKER-CONTROLLER", "", "Successfully deployed new configuration 🚀")
except :
log("DOCKER-CONTROLLER", "", "Exception while deploying new configuration :")
print(traceback.format_exc())
if not ret:
self.__logger.error(
"Error while deploying new configuration",
)
else:
self.__logger.info(
"Successfully deployed new configuration 🚀",
)
except:
self.__logger.error(
f"Exception while deploying new configuration :\n{format_exc()}",
)

View File

@ -1,51 +1,47 @@
FROM python:3-alpine
# Copy python requirements
COPY bw/deps/requirements.txt /opt/bunkerweb/deps/requirements.txt
# Install dependencies
COPY deps/requirements.txt /opt/bunkerweb/deps/requirements.txt
RUN apk add --no-cache --virtual build gcc python3-dev musl-dev libffi-dev openssl-dev cargo && \
mkdir /opt/bunkerweb/deps/python && \
pip install --no-cache-dir --require-hashes --target /opt/bunkerweb/deps/python -r /opt/bunkerweb/deps/requirements.txt && \
apk del build
RUN apk add --no-cache --virtual build g++ gcc python3-dev musl-dev libffi-dev openssl-dev cargo && \
mkdir /opt/bunkerweb/deps/python && \
pip install --no-cache-dir --require-hashes --target /opt/bunkerweb/deps/python -r /opt/bunkerweb/deps/requirements.txt && \
apk del build
# Copy files
# can't exclude specific files/dir from . so we are copying everything by hand
COPY api /opt/bunkerweb/api
COPY cli /opt/bunkerweb/cli
COPY confs /opt/bunkerweb/confs
COPY core /opt/bunkerweb/core
COPY gen /opt/bunkerweb/gen
COPY helpers /opt/bunkerweb/helpers
COPY job /opt/bunkerweb/job
COPY utils /opt/bunkerweb/utils
COPY settings.json /opt/bunkerweb/settings.json
COPY VERSION /opt/bunkerweb/VERSION
COPY autoconf /opt/bunkerweb/autoconf
COPY bw/api /opt/bunkerweb/api
COPY bw/cli /opt/bunkerweb/cli
COPY bw/confs /opt/bunkerweb/confs
COPY bw/core /opt/bunkerweb/core
COPY bw/gen /opt/bunkerweb/gen
COPY bw/helpers /opt/bunkerweb/helpers
COPY bw/settings.json /opt/bunkerweb/settings.json
COPY db /opt/bunkerweb/db
COPY utils /opt/bunkerweb/utils
COPY VERSION /opt/bunkerweb/VERSION
# Add nginx user, drop bwcli, setup data folders, permissions and logging
RUN apk add --no-cache git && \
RUN apk add --no-cache bash git && \
ln -s /usr/local/bin/python3 /usr/bin/python3 && \
addgroup -g 101 nginx && \
adduser -h /var/cache/nginx -g nginx -s /bin/sh -G nginx -D -H -u 101 nginx && \
apk add --no-cache bash && \
cp /opt/bunkerweb/helpers/bwcli /usr/local/bin && \
mkdir /opt/bunkerweb/configs && \
for dir in $(echo "cache configs configs/http configs/stream configs/server-http configs/server-stream configs/default-server-http configs/default-server-stream configs/modsec configs/modsec-crs letsencrypt plugins www") ; do ln -s "/data/${dir}" "/opt/bunkerweb/${dir}" ; done && \
for dir in $(echo "cache configs configs/http configs/stream configs/server-http configs/server-stream configs/default-server-http configs/default-server-stream configs/modsec configs/modsec-crs cache/letsencrypt plugins www") ; do mkdir -p "/data/${dir}" && ln -s "/data/${dir}" "/opt/bunkerweb/${dir}" ; done && \
mkdir /opt/bunkerweb/tmp && \
chown -R root:nginx /opt/bunkerweb && \
find /opt/bunkerweb -type f -exec chmod 0740 {} \; && \
find /opt/bunkerweb -type d -exec chmod 0750 {} \; && \
chown -R nginx:nginx /data && \
chmod 770 /opt/bunkerweb/tmp && \
chmod 750 /opt/bunkerweb/gen/main.py /opt/bunkerweb/job/main.py /opt/bunkerweb/cli/main.py /usr/local/bin/bwcli /opt/bunkerweb/helpers/*.sh /opt/bunkerweb/autoconf/main.py /opt/bunkerweb/deps/python/bin/* && \
find /opt/bunkerweb/core/*/jobs/* -type f -exec chmod 750 {} \; && \
chmod 750 /opt/bunkerweb/gen/main.py /opt/bunkerweb/cli/main.py /usr/local/bin/bwcli /opt/bunkerweb/helpers/*.sh /opt/bunkerweb/autoconf/main.py /opt/bunkerweb/deps/python/bin/* && \
chown root:nginx /usr/local/bin/bwcli && \
mkdir /etc/nginx && \
chown -R nginx:nginx /etc/nginx && \
chmod -R 770 /etc/nginx && \
ln -s /data/letsencrypt /etc/letsencrypt && \
mkdir /var/log/letsencrypt /var/lib/letsencrypt && \
chown root:nginx /var/log/letsencrypt /var/lib/letsencrypt && \
chmod 770 /var/log/letsencrypt /var/lib/letsencrypt && \
ln -s /proc/1/fd/1 /var/log/letsencrypt/letsencrypt.log
chmod -R 770 /etc/nginx
# Fix CVEs
RUN apk add "libssl1.1>=1.1.1q-r0" "libcrypto1.1>=1.1.1q-r0" "git>=2.32.3-r0" "ncurses-libs>=6.2_p20210612-r1" "ncurses-terminfo-base>=6.2_p20210612-r1" "libtirpc>=1.3.2-r1" "libtirpc-conf>=1.3.2-r1" "zlib>=1.2.12-r2" "libxml2>=2.9.14-r1"
@ -54,4 +50,6 @@ VOLUME /data /etc/nginx
WORKDIR /opt/bunkerweb/autoconf
USER root:nginx
CMD ["python", "/opt/bunkerweb/autoconf/main.py"]

View File

@ -1,224 +1,285 @@
from os import getenv
from traceback import format_exc
from kubernetes import client, config, watch
from kubernetes.client.exceptions import ApiException
from threading import Thread, Lock
from logger import log
from sys import exit
from sys import exit as sys_exit
from Controller import Controller
from ConfigCaller import ConfigCaller
from logger import setup_logger
class IngressController(Controller, ConfigCaller) :
def __init__(self) :
class IngressController(Controller, ConfigCaller):
def __init__(self):
Controller.__init__(self, "kubernetes")
ConfigCaller.__init__(self)
config.load_incluster_config()
self.__corev1 = client.CoreV1Api()
self.__networkingv1 = client.NetworkingV1Api()
self.__internal_lock = Lock()
self.__logger = setup_logger("Ingress-controller", getenv("LOG_LEVEL", "INFO"))
def _get_controller_instances(self) :
def _get_controller_instances(self):
controller_instances = []
for pod in self.__corev1.list_pod_for_all_namespaces(watch=False).items :
if pod.metadata.annotations != None and "bunkerweb.io/AUTOCONF" in pod.metadata.annotations :
for pod in self.__corev1.list_pod_for_all_namespaces(watch=False).items:
if (
pod.metadata.annotations != None
and "bunkerweb.io/INSTANCE" in pod.metadata.annotations
):
controller_instances.append(pod)
return controller_instances
def _to_instances(self, controller_instance) :
def _to_instances(self, controller_instance):
instance = {}
instance["name"] = controller_instance.metadata.name
instance["hostname"] = controller_instance.status.pod_ip
health = False
if controller_instance.status.conditions is not None :
for condition in controller_instance.status.conditions :
if condition.type == "Ready" and condition.status == "True" :
if controller_instance.status.conditions is not None:
for condition in controller_instance.status.conditions:
if condition.type == "Ready" and condition.status == "True":
health = True
break
instance["health"] = health
instance["env"] = {}
for env in controller_instance.spec.containers[0].env :
if env.value is not None :
for env in controller_instance.spec.containers[0].env:
if env.value is not None:
instance["env"][env.name] = env.value
else :
else:
instance["env"][env.name] = ""
for controller_service in self._get_controller_services() :
if controller_service.metadata.annotations is not None :
for annotation, value in controller_service.metadata.annotations.items() :
if not annotation.startswith("bunkerweb.io/") :
for controller_service in self._get_controller_services():
if controller_service.metadata.annotations is not None:
for (
annotation,
value,
) in controller_service.metadata.annotations.items():
if not annotation.startswith("bunkerweb.io/"):
continue
variable = annotation.replace("bunkerweb.io/", "", 1)
if self._is_setting(variable) :
if self._is_setting(variable):
instance["env"][variable] = value
return [instance]
def _get_controller_services(self) :
def _get_controller_services(self):
return self.__networkingv1.list_ingress_for_all_namespaces(watch=False).items
def _to_services(self, controller_service) :
if controller_service.spec is None or controller_service.spec.rules is None :
def _to_services(self, controller_service):
if controller_service.spec is None or controller_service.spec.rules is None:
return []
services = []
# parse rules
for rule in controller_service.spec.rules :
if rule.host is None :
log("INGRESS-CONTROLLER", "⚠️", "Ignoring unsupported ingress rule without host.")
for rule in controller_service.spec.rules:
if rule.host is None:
self.__logger.warning(
"Ignoring unsupported ingress rule without host.",
)
continue
service = {}
service["SERVER_NAME"] = rule.host
if rule.http is None :
if rule.http is None:
services.append(service)
continue
location = 1
for path in rule.http.paths :
if path.path is None :
log("INGRESS-CONTROLLER", "⚠️", "Ignoring unsupported ingress rule without path.")
for path in rule.http.paths:
if path.path is None:
self.__logger.warning(
"Ignoring unsupported ingress rule without path.",
)
continue
if path.backend.service is None :
log("INGRESS-CONTROLLER", "⚠️", "Ignoring unsupported ingress rule without backend service.")
if path.backend.service is None:
self.__logger.warning(
"Ignoring unsupported ingress rule without backend service.",
)
continue
if path.backend.service.port is None :
log("INGRESS-CONTROLLER", "⚠️", "Ignoring unsupported ingress rule without backend service port.")
if path.backend.service.port is None:
self.__logger.warning(
"Ignoring unsupported ingress rule without backend service port.",
)
continue
if path.backend.service.port.number is None :
log("INGRESS-CONTROLLER", "⚠️", "Ignoring unsupported ingress rule without backend service port number.")
if path.backend.service.port.number is None:
self.__logger.warning(
"Ignoring unsupported ingress rule without backend service port number.",
)
continue
service_list = self.__corev1.list_service_for_all_namespaces(watch=False, field_selector="metadata.name=" + path.backend.service.name).items
if len(service_list) == 0 :
log("INGRESS-CONTROLLER", "⚠️", "Ignoring ingress rule with service " + path.backend.service.name + " : service not found.")
service_list = self.__corev1.list_service_for_all_namespaces(
watch=False,
field_selector=f"metadata.name={path.backend.service.name}",
).items
if len(service_list) == 0:
self.__logger.warning(
f"Ignoring ingress rule with service {path.backend.service.name} : service not found.",
)
continue
reverse_proxy_host = "http://" + path.backend.service.name + "." + service_list[0].metadata.namespace + ".svc.cluster.local:" + str(path.backend.service.port.number)
reverse_proxy_host = f"http://{path.backend.service.name}.{service_list[0].metadata.namespace}.svc.cluster.local:{path.backend.service.port.number}"
service["USE_REVERSE_PROXY"] = "yes"
service["REVERSE_PROXY_HOST_" + str(location)] = reverse_proxy_host
service["REVERSE_PROXY_URL_" + str(location)] = path.path
service[f"REVERSE_PROXY_HOST_{location}"] = reverse_proxy_host
service[f"REVERSE_PROXY_URL_{location}"] = path.path
location += 1
services.append(service)
# parse tls
if controller_service.spec.tls is not None :
log("INGRESS-CONTROLLER", "⚠️", "Ignoring unsupported tls.")
if controller_service.spec.tls is not None:
self.__logger.warning("Ignoring unsupported tls.")
# parse annotations
if controller_service.metadata.annotations is not None :
for service in services :
for annotation, value in controller_service.metadata.annotations.items() :
if not annotation.startswith("bunkerweb.io/") :
if controller_service.metadata.annotations is not None:
for service in services:
for (
annotation,
value,
) in controller_service.metadata.annotations.items():
if not annotation.startswith("bunkerweb.io/"):
continue
variable = annotation.replace("bunkerweb.io/", "", 1)
if not variable.startswith(service["SERVER_NAME"].split(" ")[0] + "_") :
if not variable.startswith(
f"{service['SERVER_NAME'].split(' ')[0]}_"
):
continue
variable = variable.replace(service["SERVER_NAME"].split(" ")[0] + "_", "", 1)
if self._is_multisite_setting(variable) :
variable = variable.replace(
f"{service['SERVER_NAME'].split(' ')[0]}_", "", 1
)
if self._is_multisite_setting(variable):
service[variable] = value
return services
def _get_static_services(self) :
def _get_static_services(self):
services = []
variables = {}
for instance in self.__corev1.list_pod_for_all_namespaces(watch=False).items :
if instance.metadata.annotations is None or not "bunkerweb.io/AUTOCONF" in instance.metadata.annotations :
for instance in self.__corev1.list_pod_for_all_namespaces(watch=False).items:
if (
instance.metadata.annotations is None
or not "bunkerweb.io/INSTANCE" in instance.metadata.annotations
):
continue
for env in instance.spec.containers[0].env :
if env.value is None :
for env in instance.spec.containers[0].env:
if env.value is None:
variables[env.name] = ""
else :
else:
variables[env.name] = env.value
server_names = []
if "SERVER_NAME" in variables and variables["SERVER_NAME"] != "" :
if "SERVER_NAME" in variables and variables["SERVER_NAME"] != "":
server_names = variables["SERVER_NAME"].split(" ")
for server_name in server_names :
for server_name in server_names:
service = {}
service["SERVER_NAME"] = server_name
for variable, value in variables.items() :
for variable, value in variables.items():
prefix = variable.split("_")[0]
real_variable = variable.replace(prefix + "_", "", 1)
if prefix == server_name and self._is_multisite_setting(real_variable) :
real_variable = variable.replace(f"{prefix}_", "", 1)
if prefix == server_name and self._is_multisite_setting(real_variable):
service[real_variable] = value
services.append(service)
return services
def get_configs(self) :
def get_configs(self):
configs = {}
supported_config_types = ["http", "stream", "server-http", "server-stream", "default-server-http", "modsec", "modsec-crs"]
for config_type in supported_config_types :
supported_config_types = [
"http",
"stream",
"server-http",
"server-stream",
"default-server-http",
"modsec",
"modsec-crs",
]
for config_type in supported_config_types:
configs[config_type] = {}
for configmap in self.__corev1.list_config_map_for_all_namespaces(watch=False).items :
if configmap.metadata.annotations is None or "bunkerweb.io/CONFIG_TYPE" not in configmap.metadata.annotations :
for configmap in self.__corev1.list_config_map_for_all_namespaces(
watch=False
).items:
if (
configmap.metadata.annotations is None
or "bunkerweb.io/CONFIG_TYPE" not in configmap.metadata.annotations
):
continue
config_type = configmap.metadata.annotations["bunkerweb.io/CONFIG_TYPE"]
if config_type not in supported_config_types :
log("INGRESS-CONTROLLER", "⚠️", "Ignoring unsupported CONFIG_TYPE " + config_type + " for ConfigMap " + configmap.metadata.name)
if config_type not in supported_config_types:
self.__logger.warning(
f"Ignoring unsupported CONFIG_TYPE {config_type} for ConfigMap {configmap.metadata.name}",
)
continue
if not configmap.data :
log("INGRESS-CONTROLLER", "⚠️", "Ignoring blank ConfigMap " + configmap.metadata.name)
if not configmap.data:
self.__logger.warning(
f"Ignoring blank ConfigMap {configmap.metadata.name}",
)
continue
config_site = ""
if "bunkerweb.io/CONFIG_SITE" in configmap.metadata.annotations :
config_site = configmap.metadata.annotations["bunkerweb.io/CONFIG_SITE"] + "/"
for config_name, config_data in configmap.data.items() :
configs[config_type][config_site + config_name] = config_data
if "bunkerweb.io/CONFIG_SITE" in configmap.metadata.annotations:
config_site = (
f"{configmap.metadata.annotations['bunkerweb.io/CONFIG_SITE']}/"
)
for config_name, config_data in configmap.data.items():
configs[config_type][f"{config_site}{config_name}"] = config_data
return configs
def __watch(self, watch_type) :
def __watch(self, watch_type):
w = watch.Watch()
what = None
if watch_type == "pod" :
if watch_type == "pod":
what = self.__corev1.list_pod_for_all_namespaces
elif watch_type == "ingress" :
elif watch_type == "ingress":
what = self.__networkingv1.list_ingress_for_all_namespaces
elif watch_type == "configmap" :
elif watch_type == "configmap":
what = self.__corev1.list_config_map_for_all_namespaces
else :
raise Exception("unsupported watch_type " + watch_type)
while True :
else:
raise Exception(f"unsupported watch_type {watch_type}")
while True:
locked = False
try :
for event in w.stream(what) :
try:
for event in w.stream(what):
self.__internal_lock.acquire()
locked = True
self._instances = self.get_instances()
self._services = self.get_services()
self._configs = self.get_configs()
if not self._config.update_needed(self._instances, self._services, configs=self._configs) :
if not self._config.update_needed(
self._instances, self._services, configs=self._configs
):
self.__internal_lock.release()
locked = False
continue
log("INGRESS-CONTROLLER", "", "Catched kubernetes event, deploying new configuration ...")
try :
self.__logger.info(
"Catched kubernetes event, deploying new configuration ...",
)
try:
ret = self.apply_config()
if not ret :
log("INGRESS-CONTROLLER", "", "Error while deploying new configuration ...")
else :
log("INGRESS-CONTROLLER", "", "Successfully deployed new configuration 🚀")
except :
log("INGRESS-CONTROLLER", "", "Exception while deploying new configuration :")
print(format_exc())
if not ret:
self.__logger.error(
"Error while deploying new configuration ...",
)
else:
self.__logger.info(
"Successfully deployed new configuration 🚀",
)
except:
self.__logger.error(
f"Exception while deploying new configuration :\n{format_exc()}",
)
self.__internal_lock.release()
locked = False
except ApiException as e :
if e.status != 410 :
log("INGRESS-CONTROLLER", "", "Exception while reading k8s event (type = " + watch_type + ") : ")
print(format_exc())
exit(1)
if locked :
except ApiException as e:
if e.status != 410:
self.__logger.error(
f"Exception while reading k8s event (type = {watch_type}) :\n{format_exc()}",
)
sys_exit(1)
if locked:
self.__internal_lock.release()
except :
log("INGRESS-CONTROLLER", "", "Unknown exception while reading k8s event (type = " + watch_type + ") : ")
print(format_exc())
exit(2)
except:
self.__logger.error(
f"Unknown exception while reading k8s event (type = {watch_type}) :\n{format_exc()}",
)
sys_exit(2)
def apply_config(self) :
self._config.stop_scheduler()
def apply_config(self):
ret = self._config.apply(self._instances, self._services, configs=self._configs)
self._config.start_scheduler()
return ret
def process_events(self) :
def process_events(self):
watch_types = ["pod", "ingress", "configmap"]
threads = []
for watch_type in watch_types :
for watch_type in watch_types:
threads.append(Thread(target=self.__watch, args=(watch_type,)))
for thread in threads :
for thread in threads:
thread.start()
for thread in threads :
for thread in threads:
thread.join()

View File

@ -1,125 +1,148 @@
from os import getenv
from traceback import format_exc
from threading import Thread, Lock
from docker import DockerClient
from logger import log
from base64 import b64decode
from Controller import Controller
from ConfigCaller import ConfigCaller
from logger import setup_logger
class SwarmController(Controller, ConfigCaller) :
def __init__(self, docker_host) :
class SwarmController(Controller, ConfigCaller):
def __init__(self, docker_host):
super().__init__("swarm")
ConfigCaller.__init__(self)
self.__client = DockerClient(base_url=docker_host)
self.__internal_lock = Lock()
def _get_controller_instances(self) :
return self.__client.services.list(filters={"label" : "bunkerweb.AUTOCONF"})
def _to_instances(self, controller_instance) :
self.__logger = setup_logger("Swarm-controller", getenv("LOG_LEVEL", "INFO"))
def _get_controller_instances(self):
return self.__client.services.list(filters={"label": "bunkerweb.INSTANCE"})
def _to_instances(self, controller_instance):
instances = []
instance_env = {}
for env in controller_instance.attrs["Spec"]["TaskTemplate"]["ContainerSpec"]["Env"] :
for env in controller_instance.attrs["Spec"]["TaskTemplate"]["ContainerSpec"][
"Env"
]:
variable = env.split("=")[0]
value = env.replace(variable + "=", "", 1)
if self._is_setting(variable) :
value = env.replace(f"{variable}=", "", 1)
if self._is_setting(variable):
instance_env[variable] = value
for task in controller_instance.tasks() :
for task in controller_instance.tasks():
instance = {}
instance["name"] = task["ID"]
instance["hostname"] = controller_instance.name + "." + task["NodeID"] + "." + task["ID"]
instance[
"hostname"
] = f"{controller_instance.name}.{task['NodeID']}.{task['ID']}"
instance["health"] = task["Status"]["State"] == "running"
instance["env"] = instance_env
instances.append(instance)
return instances
def _get_controller_services(self) :
return self.__client.services.list(filters={"label" : "bunkerweb.SERVER_NAME"})
def _get_controller_services(self):
return self.__client.services.list(filters={"label": "bunkerweb.SERVER_NAME"})
def _to_services(self, controller_service) :
def _to_services(self, controller_service):
service = {}
for variable, value in controller_service.attrs["Spec"]["Labels"].items() :
if not variable.startswith("bunkerweb.") :
for variable, value in controller_service.attrs["Spec"]["Labels"].items():
if not variable.startswith("bunkerweb."):
continue
real_variable = variable.replace("bunkerweb.", "", 1)
if not self._is_multisite_setting(real_variable) :
if not self._is_multisite_setting(real_variable):
continue
service[real_variable] = value
return [service]
def _get_static_services(self) :
def _get_static_services(self):
services = []
variables = {}
for instance in self.__client.services.list(filters={"label" : "bunkerweb.AUTOCONF"}) :
for env in instance.attrs["Spec"]["TaskTemplate"]["ContainerSpec"]["Env"] :
for instance in self.__client.services.list(
filters={"label": "bunkerweb.INSTANCE"}
):
for env in instance.attrs["Spec"]["TaskTemplate"]["ContainerSpec"]["Env"]:
variable = env.split("=")[0]
value = env.replace(variable + "=", "", 1)
value = env.replace(f"{variable}=", "", 1)
variables[variable] = value
server_names = []
if "SERVER_NAME" in variables and variables["SERVER_NAME"] != "" :
if "SERVER_NAME" in variables and variables["SERVER_NAME"] != "":
server_names = variables["SERVER_NAME"].split(" ")
for server_name in server_names :
for server_name in server_names:
service = {}
service["SERVER_NAME"] = server_name
for variable, value in variables.items() :
for variable, value in variables.items():
prefix = variable.split("_")[0]
real_variable = variable.replace(prefix + "_", "", 1)
if prefix == server_name and self._is_multisite_setting(real_variable) :
real_variable = variable.replace(f"{prefix}_", "", 1)
if prefix == server_name and self._is_multisite_setting(real_variable):
service[real_variable] = value
services.append(service)
return services
def get_configs(self) :
def get_configs(self):
configs = {}
for config_type in self._supported_config_types :
for config_type in self._supported_config_types:
configs[config_type] = {}
for config in self.__client.configs.list(filters={"label" : "bunkerweb.CONFIG_TYPE"}) :
for config in self.__client.configs.list(
filters={"label": "bunkerweb.CONFIG_TYPE"}
):
config_type = config.attrs["Spec"]["Labels"]["bunkerweb.CONFIG_TYPE"]
config_name = config.name
if config_type not in self._supported_config_types :
log("SWARM-CONTROLLER", "⚠️", "Ignoring unsupported CONFIG_TYPE " + config_type + " for Config " + config_name)
if config_type not in self._supported_config_types:
self.__logger.warning(
f"Ignoring unsupported CONFIG_TYPE {config_type} for Config {config_name}",
)
continue
config_site = ""
if "bunkerweb.CONFIG_SITE" in config.attrs["Spec"]["Labels"] :
config_site = config.attrs["Spec"]["Labels"]["bunkerweb.CONFIG_SITE"] + "/"
configs[config_type][config_site + config_name] = b64decode(config.attrs["Spec"]["Data"])
if "bunkerweb.CONFIG_SITE" in config.attrs["Spec"]["Labels"]:
config_site = (
f"{config.attrs['Spec']['Labels']['bunkerweb.CONFIG_SITE']}/"
)
configs[config_type][f"{config_site}{config_name}"] = b64decode(
config.attrs["Spec"]["Data"]
)
return configs
def apply_config(self) :
self._config.stop_scheduler()
def apply_config(self):
ret = self._config.apply(self._instances, self._services, configs=self._configs)
self._config.start_scheduler()
return ret
def __event(self, event_type) :
for event in self.__client.events(decode=True, filters={"type": event_type}) :
def __event(self, event_type):
for event in self.__client.events(decode=True, filters={"type": event_type}):
self.__internal_lock.acquire()
self._instances = self.get_instances()
self._services = self.get_services()
self._configs = self.get_configs()
if not self._config.update_needed(self._instances, self._services, configs=self._configs) :
if not self._config.update_needed(
self._instances, self._services, configs=self._configs
):
self.__internal_lock.release()
continue
log("SWARM-CONTROLLER", "", "Catched Swarm event, deploying new configuration ...")
try :
self.__logger.info(
"Catched Swarm event, deploying new configuration ...",
)
try:
ret = self.apply_config()
if not ret :
log("SWARM-CONTROLLER", "", "Error while deploying new configuration ...")
else :
log("SWARM-CONTROLLER", "", "Successfully deployed new configuration 🚀")
except :
log("SWARM-CONTROLLER", "", "Exception while deploying new configuration :")
print(format_exc())
if not ret:
self.__logger.error(
"Error while deploying new configuration ...",
)
else:
self.__logger.info(
"Successfully deployed new configuration 🚀",
)
except:
self.__logger.error(
f"Exception while deploying new configuration :\n{format_exc()}",
)
self.__internal_lock.release()
def process_events(self) :
def process_events(self):
event_types = ["service", "config"]
threads = []
for event_type in event_types :
for event_type in event_types:
threads.append(Thread(target=self.__event, args=(event_type,)))
for thread in threads :
for thread in threads:
thread.start()
for thread in threads :
thread.join()
for thread in threads:
thread.join()

View File

@ -1,68 +1,128 @@
#!/usr/bin/python3
import signal, os, traceback, time, subprocess
from os import _exit, environ, getenv
from signal import SIGINT, SIGTERM, signal
from subprocess import DEVNULL, STDOUT, run
from sys import exit as sys_exit, path as sys_path
from time import sleep
from traceback import format_exc
import sys
sys.path.append("/opt/bunkerweb/deps/python")
sys.path.append("/opt/bunkerweb/utils")
sys.path.append("/opt/bunkerweb/api")
sys.path.append("/opt/bunkerweb/job")
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
sys_path.append("/opt/bunkerweb/api")
sys_path.append("/opt/bunkerweb/db")
from docker import DockerClient
from docker.errors import DockerException
from kubernetes import client as kube_client
from logger import setup_logger
from SwarmController import SwarmController
from IngressController import IngressController
from DockerController import DockerController
from logger import log
from Database import Database
# Get variables
swarm = os.getenv("SWARM_MODE", "no") == "yes"
kubernetes = os.getenv("KUBERNETES_MODE", "no") == "yes"
docker_host = os.getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
wait_retry_interval = int(os.getenv("WAIT_RETRY_INTERVAL", "5"))
logger = setup_logger("Autoconf", getenv("LOG_LEVEL", "INFO"))
swarm = getenv("SWARM_MODE", "no") == "yes"
kubernetes = getenv("KUBERNETES_MODE", "no") == "yes"
docker_host = getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
wait_retry_interval = int(getenv("WAIT_RETRY_INTERVAL", "5"))
def exit_handler(signum, frame) :
log("AUTOCONF", "", "Stop signal received, exiting...")
os._exit(0)
signal.signal(signal.SIGINT, exit_handler)
signal.signal(signal.SIGTERM, exit_handler)
try :
def exit_handler(signum, frame):
logger.info("Stop signal received, exiting...")
_exit(0)
signal(SIGINT, exit_handler)
signal(SIGTERM, exit_handler)
try:
# Setup /data folder if needed
proc = subprocess.run(["/opt/bunkerweb/helpers/data.sh", "AUTOCONF"], stdin=subprocess.DEVNULL, stderr=subprocess.STDOUT)
if proc.returncode != 0 :
os._exit(1)
proc = run(
["/opt/bunkerweb/helpers/data.sh", "AUTOCONF"],
stdin=DEVNULL,
stderr=STDOUT,
)
if proc.returncode != 0:
_exit(1)
db = None
if "DATABASE_URI" in environ:
db = Database(logger)
elif kubernetes:
corev1 = kube_client.CoreV1Api()
for pod in corev1.list_pod_for_all_namespaces(watch=False).items:
if (
pod.metadata.annotations != None
and "bunkerweb.io/INSTANCE" in pod.metadata.annotations
):
for pod_env in pod.spec.containers[0].env:
if pod_env.name == "DATABASE_URI":
db = Database(
logger,
pod_env.value or getenv("DATABASE_URI", "5000"),
)
break
else:
try:
docker_client = DockerClient(base_url="tcp://docker-proxy:2375")
except DockerException:
docker_client = DockerClient(
base_url=getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
)
apis = []
for instance in docker_client.containers.list(
filters={"label": "bunkerweb.INSTANCE"}
):
for var in instance.attrs["Config"]["Env"]:
if var.startswith("DATABASE_URI="):
db = Database(logger, var.replace("DATABASE_URI=", "", 1))
break
if db is None:
logger.error("No database found, exiting ...")
_exit(1)
while not db.is_initialized():
logger.warning(
"Database is not initialized, retrying in 5 seconds ...",
)
sleep(5)
# Instantiate the controller
if swarm :
log("AUTOCONF", "", "Swarm mode detected")
if swarm:
logger.info("Swarm mode detected")
controller = SwarmController(docker_host)
elif kubernetes :
log("AUTOCONF", "", "Kubernetes mode detected")
elif kubernetes:
logger.info("Kubernetes mode detected")
controller = IngressController()
else :
log("AUTOCONF", "", "Docker mode detected")
else:
logger.info("Docker mode detected")
controller = DockerController(docker_host)
# Wait for instances
log("AUTOCONF", "", "Waiting for BunkerWeb instances ...")
logger.info("Waiting for BunkerWeb instances ...")
instances = controller.wait(wait_retry_interval)
log("AUTOCONF", "", "BunkerWeb instances are ready 🚀")
logger.info("BunkerWeb instances are ready 🚀")
i = 1
for instance in instances :
log("AUTOCONF", "", "Instance #" + str(i) + " : " + instance["name"])
for instance in instances:
logger.info(f"Instance #{i} : {instance['name']}")
i += 1
# Run first configuration
ret = controller.apply_config()
if not ret :
log("AUTOCONF", "", "Error while applying initial configuration")
os._exit(1)
if not ret:
logger.error("Error while applying initial configuration")
_exit(1)
# Process events
log("AUTOCONF", "", "Processing events ...")
logger.info("Processing events ...")
controller.process_events()
except :
log("AUTOCONF", "", "Exception while running autoconf :")
print(traceback.format_exc())
sys.exit(1)
except:
logger.error(f"Exception while running autoconf :\n{format_exc()}")
sys_exit(1)

52
bw/api/API.py Normal file
View File

@ -0,0 +1,52 @@
from requests import request
class API:
def __init__(self, endpoint, host="bwapi"):
self.__endpoint = endpoint
self.__host = host
def get_endpoint(self):
return self.__endpoint
def get_host(self):
return self.__host
def request(self, method, url, data=None, files=None, timeout=(10, 30)):
try:
headers = {}
headers["User-Agent"] = "bwapi"
headers["Host"] = self.__host
if type(data) is dict:
resp = request(
method,
f"{self.__endpoint}{url}",
json=data,
timeout=timeout,
headers=headers,
)
elif type(data) is bytes:
resp = request(
method,
f"{self.__endpoint}{url}",
data=data,
timeout=timeout,
headers=headers,
)
elif files is not None:
resp = request(
method,
f"{self.__endpoint}{url}",
files=files,
timeout=timeout,
headers=headers,
)
elif data is None:
resp = request(
method, f"{self.__endpoint}{url}", timeout=timeout, headers=headers
)
else:
return False, "unsupported data type", None, None
except Exception as e:
return False, str(e), None, None
return True, "ok", resp.status_code, resp.json()

119
bw/cli/CLI.py Normal file
View File

@ -0,0 +1,119 @@
from os.path import isfile
from dotenv import dotenv_values
from docker import DockerClient
from kubernetes import client, config
from ApiCaller import ApiCaller
from API import API
class CLI(ApiCaller):
def __init__(self):
self.__variables = dotenv_values("/etc/nginx/variables.env")
self.__integration = self.__detect_integration()
super().__init__(self.__get_apis())
def __detect_integration(self):
ret = "unknown"
distrib = ""
if isfile("/etc/os-release"):
with open("/etc/os-release", "r") as f:
if "Alpine" in f.read():
distrib = "alpine"
else:
distrib = "other"
# Docker case
if distrib == "alpine" and isfile("/usr/sbin/nginx"):
return "docker"
# Linux case
if distrib == "other":
return "linux"
# Swarm case
if self.__variables["SWARM_MODE"] == "yes":
return "swarm"
# Kubernetes case
if self.__variables["KUBERNETES_MODE"] == "yes":
return "kubernetes"
# Autoconf case
if distrib == "alpine":
return "autoconf"
raise Exception("can't detect integration")
def __get_apis(self):
# Docker case
if self.__integration == "docker" or self.__integration == "linux":
return [
API(
f"http://127.0.0.1:{self.__variables['API_HTTP_PORT']}",
host=self.__variables["API_SERVER_NAME"],
)
]
# Autoconf case
if self.__integration == "autoconf":
docker_client = DockerClient()
apis = []
for container in self.__client.containers.list(
filters={"label": "bunkerweb.INSTANCE"}
):
port = "5000"
host = "bwapi"
for env in container.attrs["Config"]["Env"]:
if env.startswith("API_HTTP_PORT="):
port = env.split("=")[1]
elif env.startswith("API_SERVER_NAME="):
host = env.split("=")[1]
apis.append(API(f"http://{container.name}:{port}", host=host))
return apis
# Swarm case
if self.__integration == "swarm":
docker_client = DockerClient()
apis = []
for service in self.__client.services.list(
filters={"label": "bunkerweb.INSTANCE"}
):
port = "5000"
host = "bwapi"
for env in service.attrs["Spec"]["TaskTemplate"]["ContainerSpec"][
"Env"
]:
if env.startswith("API_HTTP_PORT="):
port = env.split("=")[1]
elif env.startswith("API_SERVER_NAME="):
host = env.split("=")[1]
for task in service.tasks():
apis.append(
API(
f"http://{service.name}.{task['NodeID']}.{task['ID']}:{port}",
host=host,
)
)
return apis
# Kubernetes case
if self.__integration == "kubernetes":
config.load_incluster_config()
corev1 = client.CoreV1Api()
apis = []
for pod in corev1.list_pod_for_all_namespaces(watch=False).items:
if (
pod.metadata.annotations != None
and "bunkerweb.io/INSTANCE" in pod.metadata.annotations
and pod.status.pod_ip
):
port = "5000"
host = "bwapi"
for env in pod.spec.containers[0].env:
if env.name == "API_HTTP_PORT":
port = env.value
elif env.name == "API_SERVER_NAME":
host = env.value
apis.append(API(f"http://{pod.status.pod_ip}:{port}", host=host))
return apis
def unban(self, ip):
if self._send_to_apis("POST", "/unban", data={"ip": ip}):
return True, f"IP {ip} has been unbanned"
return False, "error"

54
bw/cli/main.py Normal file
View File

@ -0,0 +1,54 @@
#!/usr/bin/env python3
from argparse import ArgumentParser
from os import _exit
from sys import exit as sys_exit, path
from traceback import format_exc
path.append("/opt/bunkerweb/deps/python")
path.append("/opt/bunkerweb/cli")
path.append("/opt/bunkerweb/utils")
path.append("/opt/bunkerweb/api")
from logger import setup_logger
from CLI import CLI
if __name__ == "__main__":
logger = setup_logger("CLI", "INFO")
try:
# Global parser
parser = ArgumentParser(description="BunkerWeb Command Line Interface")
subparsers = parser.add_subparsers(help="command", dest="command")
# Unban subparser
parser_unban = subparsers.add_parser(
"unban", help="remove a ban from the cache"
)
parser_unban.add_argument("ip", type=str, help="IP address to unban")
# Parse args
args = parser.parse_args()
# Instantiate CLI
cli = CLI()
# Execute command
ret, err = False, "unknown command"
if args.command == "unban":
ret, err = cli.unban(args.ip)
if not ret:
logger.error(f"CLI command status : ❌ (fail)\n{err}")
_exit(1)
else:
logger.info(f"CLI command status : ✔️ (success)\n{err}")
_exit(0)
except SystemExit as se:
sys_exit(se.code)
except:
logger.error(f"Error while executing bwcli :\n{format_exc()}")
sys_exit(1)
sys_exit(0)

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,98 @@
{
"id": "antibot",
"order": 8,
"name": "Antibot",
"description": "Bot detection by using a challenge.",
"version": "0.1",
"settings": {
"USE_ANTIBOT": {
"context": "multisite",
"default": "no",
"help": "Activate antibot feature.",
"id": "use-antibot",
"label": "Antibot challenge",
"regex": "^(no|cookie|javascript|captcha|recaptcha|hcaptcha)$",
"type": "select",
"select": [
"no",
"cookie",
"javascript",
"captcha",
"recaptcha",
"hcaptcha"
]
},
"ANTIBOT_URI": {
"context": "multisite",
"default": "/challenge",
"help": "Unused URI that clients will be redirected to to solve the challenge.",
"id": "antibot-uri",
"label": "Antibot URL",
"regex": "^.*$",
"type": "text"
},
"ANTIBOT_SESSION_SECRET": {
"context": "global",
"default": "random",
"help": "Secret used to encrypt sessions variables for storing data related to challenges.",
"id": "antibot-session-secret",
"label": "Session secret",
"regex": "^.*$",
"type": "text"
},
"ANTIBOT_SESSION_NAME": {
"context": "global",
"default": "random",
"help": "Name of the cookie used by the antibot feature.",
"id": "antibot-session-name",
"label": "Session name",
"regex": "^.*$",
"type": "text"
},
"ANTIBOT_RECAPTCHA_SCORE": {
"context": "multisite",
"default": "0.7",
"help": "Minimum score required for reCAPTCHA challenge.",
"id": "antibot-recaptcha-score",
"label": "reCAPTCHA score",
"regex": "^.*$",
"type": "text"
},
"ANTIBOT_RECAPTCHA_SITEKEY": {
"context": "multisite",
"default": "",
"help": "Sitekey for reCAPTCHA challenge.",
"id": "antibot-recaptcha-sitekey",
"label": "reCAPTCHA sitekey",
"regex": "^.*$",
"type": "text"
},
"ANTIBOT_RECAPTCHA_SECRET": {
"context": "multisite",
"default": "",
"help": "Secret for reCAPTCHA challenge.",
"id": "antibot-recaptcha-secret",
"label": "reCAPTCHA secret",
"regex": "^.*$",
"type": "text"
},
"ANTIBOT_HCAPTCHA_SITEKEY": {
"context": "multisite",
"default": "",
"help": "Sitekey for hCaptcha challenge.",
"id": "antibot-hcaptcha-sitekey",
"label": "hCaptcha sitekey",
"regex": "^.*$",
"type": "text"
},
"ANTIBOT_HCAPTCHA_SECRET": {
"context": "multisite",
"default": "",
"help": "Secret for hCaptcha challenge.",
"id": "antibot-hcaptcha-secret",
"label": "hCaptcha secret",
"regex": "^.*$",
"type": "text"
}
}
}

View File

@ -0,0 +1,54 @@
{
"id": "authbasic",
"order": 999,
"name": "Auth basic",
"description": "Enforce login before accessing a resource or the whole site using HTTP basic auth method.",
"version": "0.1",
"settings": {
"USE_AUTH_BASIC": {
"context": "multisite",
"default": "no",
"help": "Use HTTP basic auth",
"id": "use-auth-basic",
"label": "Use HTTP basic auth",
"regex": "^(yes|no)$",
"type": "check"
},
"AUTH_BASIC_LOCATION": {
"context": "multisite",
"default": "sitewide",
"help": "URL of the protected resource or sitewide value.",
"id": "auth-basic-location",
"label": "Location",
"regex": "^.*$",
"type": "text"
},
"AUTH_BASIC_USER": {
"context": "multisite",
"default": "changeme",
"help": "Username",
"id": "auth-basic-user",
"label": "Username",
"regex": "^.*$",
"type": "text"
},
"AUTH_BASIC_PASSWORD": {
"context": "multisite",
"default": "changeme",
"help": "Password",
"id": "auth-basic-password",
"label": "Password",
"regex": "^.*$",
"type": "text"
},
"AUTH_BASIC_TEXT": {
"context": "multisite",
"default": "Restricted area",
"help": "Text to display",
"id": "auth-basic-text",
"label": "Text",
"regex": "^.*$",
"type": "text"
}
}
}

View File

@ -0,0 +1,54 @@
{
"id": "badbehavior",
"order": 999,
"name": "Bad behavior",
"description": "Ban IP generating too much 'bad' HTTP status code in a period of time.",
"version": "0.1",
"settings": {
"USE_BAD_BEHAVIOR": {
"context": "multisite",
"default": "yes",
"help": "Activate Bad behavior feature.",
"id": "use-bad-behavior",
"label": "Activate bad behavior",
"regex": "^(yes|no)$",
"type": "check"
},
"BAD_BEHAVIOR_STATUS_CODES": {
"context": "multisite",
"default": "400 401 403 404 405 429 444",
"help": "List of HTTP status codes considered as 'bad'.",
"id": "bad-behavior-status-code",
"label": "Bad status codes",
"regex": "^.*$",
"type": "text"
},
"BAD_BEHAVIOR_BAN_TIME": {
"context": "multisite",
"default": "86400",
"help": "The duration time (in seconds) of a ban when the corresponding IP has reached the threshold.",
"id": "bad-behavior-ban-time",
"label": "Ban duration (in seconds)",
"regex": "^.*$",
"type": "text"
},
"BAD_BEHAVIOR_THRESHOLD": {
"context": "multisite",
"default": "10",
"help": "Maximum number of 'bad' HTTP status codes within the period of time before IP is banned.",
"id": "bad-behavior-threshold",
"label": "Threshold",
"regex": "^.*$",
"type": "text"
},
"BAD_BEHAVIOR_COUNT_TIME": {
"context": "multisite",
"default": "60",
"help": "Period of time during which we count 'bad' HTTP status codes.",
"id": "bad-behavior-period",
"label": "Period (in seconds)",
"regex": "^.*$",
"type": "text"
}
}
}

View File

@ -0,0 +1,182 @@
#!/usr/bin/python3
from ipaddress import ip_address, ip_network
from os import _exit, getenv, makedirs
from re import match
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
from requests import get
from logger import setup_logger
from jobs import cache_file, cache_hash, is_cached_file, file_hash
def check_line(kind, line):
if kind == "IP":
if "/" in line:
try:
ip_network(line)
return True, line
except ValueError:
pass
else:
try:
ip_address(line)
return True, line
except ValueError:
pass
return False, ""
elif kind == "RDNS":
if match(r"^(\.?[A-Za-z0-9\-]+)*\.[A-Za-z]{2,}$", line):
return True, line.lower()
return False, ""
elif kind == "ASN":
real_line = line.replace("AS", "")
if match(r"^\d+$", real_line):
return True, real_line
elif kind == "USER_AGENT":
return True, line.replace("\\ ", " ").replace("\\.", "%.").replace(
"\\\\", "\\"
).replace("-", "%-")
elif kind == "URI":
if match(r"^/", line):
return True, line
return False, ""
logger = setup_logger("BLACKLIST", getenv("LOG_LEVEL", "INFO"))
status = 0
try:
# Check if at least a server has Blacklist activated
blacklist_activated = False
# Multisite case
if getenv("MULTISITE") == "yes":
for first_server in getenv("SERVER_NAME").split(" "):
if (
getenv(first_server + "_USE_BLACKLIST", getenv("USE_BLACKLIST"))
== "yes"
):
blacklist_activated = True
break
# Singlesite case
elif getenv("USE_BLACKLIST") == "yes":
blacklist_activated = True
if not blacklist_activated:
logger.info("Blacklist is not activated, skipping downloads...")
_exit(0)
# Create directories if they don't exist
makedirs("/opt/bunkerweb/cache/blacklist", exist_ok=True)
makedirs("/opt/bunkerweb/tmp/blacklist", exist_ok=True)
# Our urls data
urls = {"IP": [], "RDNS": [], "ASN": [], "USER_AGENT": [], "URI": []}
# Don't go further if the cache is fresh
kinds_fresh = {
"IP": True,
"RDNS": True,
"ASN": True,
"USER_AGENT": True,
"URI": True,
"IGNORE_IP": True,
"IGNORE_RDNS": True,
"IGNORE_ASN": True,
"IGNORE_USER_AGENT": True,
"IGNORE_URI": True,
}
all_fresh = True
for kind in kinds_fresh:
if not is_cached_file(f"/opt/bunkerweb/cache/blacklist/{kind}.list", "hour"):
kinds_fresh[kind] = False
all_fresh = False
logger.info(
f"Blacklist for {kind} is not cached, processing downloads..",
)
else:
logger.info(
f"Blacklist for {kind} is already in cache, skipping downloads...",
)
if all_fresh:
_exit(0)
# Get URLs
urls = {
"IP": [],
"RDNS": [],
"ASN": [],
"USER_AGENT": [],
"URI": [],
"IGNORE_IP": [],
"IGNORE_RDNS": [],
"IGNORE_ASN": [],
"IGNORE_USER_AGENT": [],
"IGNORE_URI": [],
}
for kind in urls:
for url in getenv(f"BLACKLIST_{kind}_URLS", "").split(" "):
if url != "" and url not in urls[kind]:
urls[kind].append(url)
# Loop on kinds
for kind, urls_list in urls.items():
if kinds_fresh[kind]:
continue
# Write combined data of the kind to a single temp file
for url in urls_list:
try:
logger.info(f"Downloading blacklist data from {url} ...")
resp = get(url, stream=True)
if resp.status_code != 200:
continue
i = 0
with open(f"/opt/bunkerweb/tmp/blacklist/{kind}.list", "w") as f:
for line in resp.iter_lines(decode_unicode=True):
line = line.strip()
if kind != "USER_AGENT":
line = line.strip().split(" ")[0]
if line == "" or line.startswith("#") or line.startswith(";"):
continue
ok, data = check_line(kind, line)
if ok:
f.write(data + "\n")
i += 1
logger.info(f"Downloaded {i} bad {kind}")
# Check if file has changed
new_hash = file_hash(f"/opt/bunkerweb/tmp/blacklist/{kind}.list")
old_hash = cache_hash(f"/opt/bunkerweb/cache/blacklist/{kind}.list")
if new_hash == old_hash:
logger.info(
f"New file {kind}.list is identical to cache file, reload is not needed",
)
else:
logger.info(
f"New file {kind}.list is different than cache file, reload is needed",
)
# Put file in cache
cached, err = cache_file(
f"/opt/bunkerweb/tmp/blacklist/{kind}.list",
f"/opt/bunkerweb/cache/blacklist/{kind}.list",
new_hash,
)
if not cached:
logger.error(f"Error while caching blacklist : {err}")
status = 2
if status != 2:
status = 1
except:
status = 2
logger.error(
f"Exception while getting blacklist from {url} :\n{format_exc()}"
)
except:
status = 2
logger.error(f"Exception while running blacklist-download.py :\n{format_exc()}")
sys_exit(status)

View File

@ -0,0 +1,215 @@
{
"id": "blacklist",
"order": 2,
"name": "Blacklist",
"description": "Deny access based on internal and external IP/network/rDNS/ASN blacklists.",
"version": "0.1",
"settings": {
"USE_BLACKLIST": {
"context": "multisite",
"default": "yes",
"help": "Activate blacklist feature.",
"id": "use-blacklist",
"label": "Activate blacklisting",
"regex": "^(yes|no)$",
"type": "check"
},
"BLACKLIST_IP_URLS": {
"context": "global",
"default": "https://www.dan.me.uk/torlist/?exit",
"help": "List of URLs, separated with spaces, containing bad IP/network to block.",
"id": "blacklist-ip-urls",
"label": "Blacklist IP/network URLs",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_IP": {
"context": "multisite",
"default": "",
"help": "List of IP/network, separated with spaces, to block.",
"id": "blacklist-ip",
"label": "Blacklist IP/network",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_RDNS": {
"context": "multisite",
"default": ".shodan.io .censys.io",
"help": "List of reverse DNS suffixes, separated with spaces, to block.",
"id": "blacklist-rdns",
"label": "Blacklist reverse DNS",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_RDNS_URLS": {
"context": "global",
"default": "",
"help": "List of URLs, separated with spaces, containing reverse DNS suffixes to block.",
"id": "blacklist-rdns-urls",
"label": "Blacklist reverse DNS URLs",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_RDNS_GLOBAL": {
"context": "multisite",
"default": "yes",
"help": "Only perform RDNS blacklist checks on global IP addresses.",
"id": "blacklist-rdns-global",
"label": "Blacklist reverse DNS global IPs",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_ASN": {
"context": "multisite",
"default": "",
"help": "List of ASN numbers, separated with spaces, to block.",
"id": "blacklist-asn",
"label": "Blacklist ASN",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_ASN_URLS": {
"context": "global",
"default": "",
"help": "List of URLs, separated with spaces, containing ASN to block.",
"id": "blacklist-rdns-urls",
"label": "Blacklist ASN URLs",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_USER_AGENT": {
"context": "multisite",
"default": "",
"help": "List of User-Agent, separated with spaces, to block.",
"id": "blacklist-user-agent",
"label": "Blacklist User-Agent",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_USER_AGENT_URLS": {
"context": "global",
"default": "https://raw.githubusercontent.com/mitchellkrogza/nginx-ultimate-bad-bot-blocker/master/_generator_lists/bad-user-agents.list",
"help": "List of URLs, separated with spaces, containing bad User-Agent to block.",
"id": "blacklist-user-agent-urls",
"label": "Blacklist User-Agent URLs",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_URI": {
"context": "multisite",
"default": "",
"help": "List of URI, separated with spaces, to block.",
"id": "blacklist-uri",
"label": "Blacklist URI",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_URI_URLS": {
"context": "global",
"default": "",
"help": "List of URLs, separated with spaces, containing bad URI to block.",
"id": "blacklist-uri-urls",
"label": "Blacklist URI URLs",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_IGNORE_IP_URLS": {
"context": "global",
"default": "",
"help": "List of URLs, separated with spaces, containing IP/network to ignore in the blacklist.",
"id": "blacklist-ip-urls",
"label": "Blacklist IP/network URLs",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_IGNORE_IP": {
"context": "multisite",
"default": "",
"help": "List of IP/network, separated with spaces, to ignore in the blacklist.",
"id": "blacklist-ip",
"label": "Blacklist IP/network",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_IGNORE_RDNS": {
"context": "multisite",
"default": "",
"help": "List of reverse DNS suffixes, separated with spaces, to ignore in the blacklist.",
"id": "blacklist-rdns",
"label": "Blacklist reverse DNS",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_IGNORE_RDNS_URLS": {
"context": "global",
"default": "",
"help": "List of URLs, separated with spaces, containing reverse DNS suffixes to ignore in the blacklist.",
"id": "blacklist-rdns-urls",
"label": "Blacklist reverse DNS URLs",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_IGNORE_ASN": {
"context": "multisite",
"default": "",
"help": "List of ASN numbers, separated with spaces, to ignore in the blacklist.",
"id": "blacklist-asn",
"label": "Blacklist ASN",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_IGNORE_ASN_URLS": {
"context": "global",
"default": "",
"help": "List of URLs, separated with spaces, containing ASN to ignore in the blacklist.",
"id": "blacklist-rdns-urls",
"label": "Blacklist ASN URLs",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_IGNORE_USER_AGENT": {
"context": "multisite",
"default": "",
"help": "List of User-Agent, separated with spaces, to ignore in the blacklist.",
"id": "blacklist-user-agent",
"label": "Blacklist User-Agent",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_IGNORE_USER_AGENT_URLS": {
"context": "global",
"default": "",
"help": "List of URLs, separated with spaces, containing User-Agent to ignore in the blacklist.",
"id": "blacklist-user-agent-urls",
"label": "Blacklist User-Agent URLs",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_IGNORE_URI": {
"context": "multisite",
"default": "",
"help": "List of URI, separated with spaces, to ignore in the blacklist.",
"id": "blacklist-uri",
"label": "Blacklist URI",
"regex": "^.*$",
"type": "text"
},
"BLACKLIST_IGNORE_URI_URLS": {
"context": "global",
"default": "",
"help": "List of URLs, separated with spaces, containing URI to ignore in the blacklist.",
"id": "blacklist-uri-urls",
"label": "Blacklist URI URLs",
"regex": "^.*$",
"type": "text"
}
},
"jobs": [
{
"name": "blacklist-download",
"file": "blacklist-download.py",
"every": "hour",
"reload": true
}
]
}

View File

@ -0,0 +1,46 @@
{
"id": "brotli",
"order": 999,
"name": "Brotli",
"description": "Compress HTTP requests with the brotli algorithm.",
"version": "0.1",
"settings": {
"USE_BROTLI": {
"context": "multisite",
"default": "no",
"help": "Use brotli",
"id": "use-brotli",
"label": "Use brotli",
"regex": "^(yes|no)$",
"type": "check"
},
"BROTLI_TYPES": {
"context": "multisite",
"default": "application/atom+xml application/javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-opentype application/x-font-truetype application/x-font-ttf application/x-javascript application/xhtml+xml application/xml font/eot font/opentype font/otf font/truetype image/svg+xml image/vnd.microsoft.icon image/x-icon image/x-win-bitmap text/css text/javascript text/plain text/xml",
"help": "List of MIME types that will be compressed with brotli.",
"id": "brotli-types",
"label": "MIME types",
"regex": "^.*$",
"type": "text"
},
"BROTLI_MIN_LENGTH": {
"context": "multisite",
"default": "1000",
"help": "Minimum length for brotli compression.",
"id": "brotli-min-length",
"label": "Minimum length",
"regex": "^.*$",
"type": "text"
},
"BROTLI_COMP_LEVEL": {
"context": "multisite",
"default": "6",
"help": "The compression level of the brotli algorithm.",
"id": "brotli-comp-level",
"label": "Compression level",
"regex": "^([1-9]|10|11)$",
"type": "select",
"select": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"]
}
}
}

View File

@ -0,0 +1,108 @@
#!/usr/bin/python3
from os import _exit, getenv, makedirs
from os.path import isfile
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
sys_path.append("/opt/bunkerweb/core/bunkernet/jobs")
from logger import setup_logger
from jobs import cache_file, cache_hash, file_hash, is_cached_file
from bunkernet import data
logger = setup_logger("BUNKERNET", getenv("LOG_LEVEL", "INFO"))
status = 0
try:
# Check if at least a server has BunkerNet activated
bunkernet_activated = False
# Multisite case
if getenv("MULTISITE") == "yes":
for first_server in getenv("SERVER_NAME").split(" "):
if (
getenv(f"{first_server}_USE_BUNKERNET", getenv("USE_BUNKERNET"))
== "yes"
):
bunkernet_activated = True
break
# Singlesite case
elif getenv("USE_BUNKERNET") == "yes":
bunkernet_activated = True
if not bunkernet_activated:
logger.info("BunkerNet is not activated, skipping download...")
_exit(0)
# Create directory if it doesn't exist
makedirs("/opt/bunkerweb/cache/bunkernet", exist_ok=True)
# Check if ID is present
if not isfile("/opt/bunkerweb/cache/bunkernet/instance.id"):
logger.error(
"Not downloading BunkerNet data because instance is not registered",
)
_exit(2)
# Don't go further if the cache is fresh
if is_cached_file("/opt/bunkerweb/cache/bunkernet/ip.list", "day"):
logger.info(
"BunkerNet list is already in cache, skipping download...",
)
_exit(0)
# Download data
logger.info("Downloading BunkerNet data ...")
ok, status, data = data()
if not ok:
logger.error(
f"Error while sending data request to BunkerNet API : {data}",
)
_exit(2)
elif status == 429:
logger.warning(
"BunkerNet API is rate limiting us, trying again later...",
)
_exit(0)
elif data["result"] != "ok":
logger.error(
f"Received error from BunkerNet API while sending db request : {data['data']}, removing instance ID",
)
_exit(2)
logger.info("Successfully downloaded data from BunkerNet API")
# Writing data to file
logger.info("Saving BunkerNet data ...")
with open("/opt/bunkerweb/tmp/bunkernet-ip.list", "w") as f:
for ip in data["data"]:
f.write(f"{ip}\n")
# Check if file has changed
new_hash = file_hash("/opt/bunkerweb/tmp/bunkernet-ip.list")
old_hash = cache_hash("/opt/bunkerweb/cache/bunkernet/ip.list")
if new_hash == old_hash:
logger.info(
"New file is identical to cache file, reload is not needed",
)
_exit(0)
# Put file in cache
cached, err = cache_file(
"/opt/bunkerweb/tmp/bunkernet-ip.list",
"/opt/bunkerweb/cache/bunkernet/ip.list",
new_hash,
)
if not cached:
logger.error(f"Error while caching BunkerNet data : {err}")
_exit(2)
logger.info("Successfully saved BunkerNet data")
status = 1
except:
status = 2
logger.error(f"Exception while running bunkernet-data.py :\n{format_exc()}")
sys_exit(status)

View File

@ -0,0 +1,121 @@
#!/usr/bin/python3
from os import _exit, getenv, makedirs, remove
from os.path import isfile
from sys import exit as sys_exit, path as sys_path
from time import sleep
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
sys_path.append("/opt/bunkerweb/core/bunkernet/jobs")
from logger import setup_logger
from bunkernet import register, ping, get_id
logger = setup_logger("BUNKERNET", getenv("LOG_LEVEL", "INFO"))
status = 0
try:
# Check if at least a server has BunkerNet activated
bunkernet_activated = False
# Multisite case
if getenv("MULTISITE") == "yes":
for first_server in getenv("SERVER_NAME").split(" "):
if (
getenv(f"{first_server}_USE_BUNKERNET", getenv("USE_BUNKERNET", "yes"))
== "yes"
):
bunkernet_activated = True
break
# Singlesite case
elif getenv("USE_BUNKERNET", "yes") == "yes":
bunkernet_activated = True
if not bunkernet_activated:
logger.info("BunkerNet is not activated, skipping registration...")
_exit(0)
# Create directory if it doesn't exist
makedirs("/opt/bunkerweb/cache/bunkernet", exist_ok=True)
# Ask an ID if needed
bunkernet_id = None
if not isfile("/opt/bunkerweb/cache/bunkernet/instance.id"):
logger.info("Registering instance on BunkerNet API ...")
ok, status, data = register()
if not ok:
logger.error(
f"Error while sending register request to BunkerNet API : {data}"
)
_exit(1)
elif status == 429:
logger.warning(
"BunkerNet API is rate limiting us, trying again later...",
)
_exit(0)
elif status != 200:
logger.error(
f"Error {status} from BunkerNet API : {data['data']}",
)
_exit(1)
elif data["result"] != "ok":
logger.error(
f"Received error from BunkerNet API while sending register request : {data['data']}"
)
_exit(1)
bunkernet_id = data["data"]
logger.info(
f"Successfully registered on BunkerNet API with instance id {data['data']}"
)
else:
with open("/opt/bunkerweb/cache/bunkernet/instance.id", "r") as f:
bunkernet_id = f.read()
logger.info(f"Already registered on BunkerNet API with instance id {get_id()}")
# Ping
logger.info("Checking connectivity with BunkerNet API ...")
bunkernet_ping = False
for i in range(0, 5):
ok, status, data = ping(bunkernet_id)
retry = False
if not ok:
logger.error(f"Error while sending ping request to BunkerNet API : {data}")
retry = True
elif status == 429:
logger.warning(
"BunkerNet API is rate limiting us, trying again later...",
)
retry = True
elif status == 401:
logger.warning(
"Instance ID is not registered, removing it and retrying a register later...",
)
remove("/opt/bunkerweb/cache/bunkernet/instance.id")
_exit(2)
elif data["result"] != "ok":
logger.error(
f"Received error from BunkerNet API while sending ping request : {data['data']}, removing instance ID",
)
retry = True
if not retry:
bunkernet_ping = True
break
logger.warning("Waiting 1s and trying again ...")
sleep(1)
if bunkernet_ping:
logger.info("Connectivity with BunkerWeb is successful !")
status = 1
if not isfile("/opt/bunkerweb/cache/bunkernet/instance.id"):
with open("/opt/bunkerweb/cache/bunkernet/instance.id", "w") as f:
f.write(bunkernet_id)
else:
logger.error("Connectivity with BunkerWeb failed ...")
status = 2
except:
status = 2
logger.error(f"Exception while running bunkernet-register.py :\n{format_exc()}")
sys_exit(status)

View File

@ -0,0 +1,62 @@
import requests, traceback
from os import getenv
def request(method, url, _id=None):
data = {"integration": get_integration(), "version": get_version()}
headers = {"User-Agent": "BunkerWeb/" + get_version()}
if _id is not None:
data["id"] = _id
try:
resp = requests.request(
method,
getenv("BUNKERNET_SERVER", "https://api.bunkerweb.io") + url,
json=data,
headers=headers,
timeout=5,
)
status = resp.status_code
if status == 429:
return True, 429, "rate limited"
raw_data = resp.json()
assert "result" in raw_data
assert "data" in raw_data
except Exception as e:
return False, None, traceback.format_exc()
return True, status, raw_data
def register():
return request("POST", "/register")
def ping(_id=None):
return request("GET", "/ping", _id=get_id() if _id is None else _id)
def data():
return request("GET", "/db", _id=get_id())
def get_id():
with open("/opt/bunkerweb/cache/bunkernet/instance.id", "r") as f:
return f.read().strip()
def get_version():
with open("/opt/bunkerweb/VERSION", "r") as f:
return f.read().strip()
def get_integration():
try:
if getenv("KUBERNETES_MODE") == "yes":
return "kubernetes"
if getenv("SWARM_MODE") == "yes":
return "swarm"
with open("/etc/os-release", "r") as f:
if f.read().contains("Alpine"):
return "docker"
return "linux"
except:
return "unknown"

View File

@ -0,0 +1,41 @@
{
"id": "bunkernet",
"order": 6,
"name": "BunkerNet",
"description": "Share threat data with other BunkerWeb instances via BunkerNet.",
"version": "0.1",
"settings": {
"USE_BUNKERNET": {
"context": "multisite",
"default": "yes",
"help": "Activate BunkerNet feature.",
"id": "use-bunkernet",
"label": "Activate BunkerNet",
"regex": "^(yes|no)$",
"type": "check"
},
"BUNKERNET_SERVER": {
"context": "global",
"default": "https://api.bunkerweb.io",
"help": "Address of the BunkerNet API.",
"id": "bunkernet-server",
"label": "BunkerNet server",
"regex": "^.*$",
"type": "text"
}
},
"jobs": [
{
"name": "bunkernet-register",
"file": "bunkernet-register.py",
"every": "hour",
"reload": true
},
{
"name": "bunkernet-data",
"file": "bunkernet-data.py",
"every": "day",
"reload": true
}
]
}

View File

@ -0,0 +1,45 @@
{
"id": "clientcache",
"order": 999,
"name": "Client cache",
"description": "Manage caching for clients.",
"version": "0.1",
"settings": {
"USE_CLIENT_CACHE": {
"context": "multisite",
"default": "no",
"help": "Tell client to store locally static files.",
"id": "use-client-cache",
"label": "Use client cache",
"regex": "^(yes|no)$",
"type": "check"
},
"CLIENT_CACHE_EXTENSIONS": {
"context": "global",
"default": "jpg|jpeg|png|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2",
"help": "List of file extensions that should be cached.",
"id": "client-cache-extensions",
"label": "Extensions that should be cached by the client",
"regex": "^.*$",
"type": "text"
},
"CLIENT_CACHE_ETAG": {
"context": "multisite",
"default": "yes",
"help": "Send the HTTP ETag header for static resources.",
"id": "client-cache-etag",
"label": "ETag",
"regex": "^(yes|no)$",
"type": "check"
},
"CLIENT_CACHE_CONTROL": {
"context": "multisite",
"default": "public, max-age=15552000",
"help": "Value of the Cache-Control HTTP header.",
"id": "client-cache-control",
"label": "Cache-Control header",
"regex": "^.*$",
"type": "text"
}
}
}

72
bw/core/cors/plugin.json Normal file
View File

@ -0,0 +1,72 @@
{
"id": "cors",
"order": 999,
"name": "CORS",
"description": "Cross-Origin Resource Sharing.",
"version": "0.1",
"settings": {
"USE_CORS": {
"context": "multisite",
"default": "no",
"help": "Use CORS",
"id": "use-cors",
"label": "Use CORS",
"regex": "^(yes|no)$",
"type": "check"
},
"CORS_ALLOW_ORIGIN": {
"context": "multisite",
"default": "*",
"help": "Value of the Access-Control-Allow-Origin header.",
"id": "cors-allow-origin",
"label": "Access-Control-Allow-Origin value",
"regex": "^.*$",
"type": "text"
},
"CORS_EXPOSE_HEADERS": {
"context": "multisite",
"default": "Content-Length,Content-Range",
"help": "Value of the Access-Control-Expose-Headers header.",
"id": "cors-expose-headers",
"label": "Access-Control-Expose-Headers value",
"regex": "^.*$",
"type": "text"
},
"CORS_MAX_AGE": {
"context": "multisite",
"default": "86400",
"help": "Value of the Access-Control-Max-Age header.",
"id": "cors-max-age",
"label": "Access-Control-Max-Age value",
"regex": "^[0-9]+$",
"type": "text"
},
"CORS_ALLOW_CREDENTIALS": {
"context": "multisite",
"default": "no",
"help": "Send the Access-Control-Allow-Credentials header.",
"id": "cors-allow-credentials",
"label": "Send Access-Control-Allow-Credentials",
"regex": "^(yes|no)$",
"type": "check"
},
"CORS_ALLOW_METHODS": {
"context": "multisite",
"default": "GET, POST, OPTIONS",
"help": "Value of the Access-Control-Allow-Methods header.",
"id": "cors-allow-methods",
"label": "Access-Control-Allow-Methods value",
"regex": "^.*$",
"type": "text"
},
"CORS_ALLOW_HEADERS": {
"context": "multisite",
"default": "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range",
"help": "Value of the Access-Control-Allow-Headers header.",
"id": "cors-allow-headers",
"label": "Access-Control-Allow-Headers value",
"regex": "^.*$",
"type": "text"
}
}
}

View File

@ -0,0 +1,27 @@
{
"id": "country",
"order": 4,
"name": "Country",
"description": "Deny access based on the country of the client IP.",
"version": "0.1",
"settings": {
"BLACKLIST_COUNTRY": {
"context": "multisite",
"default": "",
"help": "Deny access if the country of the client is in the list (2 letters code).",
"id": "country-blacklist",
"label": "Country blacklist",
"regex": "^.*$",
"type": "text"
},
"WHITELIST_COUNTRY": {
"context": "multisite",
"default": "",
"help": "Deny access if the country of the client is not in the list (2 letters code).",
"id": "country-whitelist",
"label": "Country whitelist",
"regex": "^.*$",
"type": "text"
}
}
}

View File

@ -0,0 +1,85 @@
#!/usr/bin/python3
from os import getenv, makedirs
from os.path import isfile
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
from jobs import file_hash
from logger import setup_logger
logger = setup_logger("CUSTOM-CERT", getenv("LOG_LEVEL", "INFO"))
def check_cert(cert_path):
try:
cache_path = (
"/opt/bunkerweb/cache/customcert/" + cert_path.replace("/", "_") + ".hash"
)
current_hash = file_hash(cert_path)
if not isfile(cache_path):
with open(cache_path, "w") as f:
f.write(current_hash)
old_hash = file_hash(cache_path)
if old_hash == current_hash:
return False
with open(cache_path, "w") as f:
f.write(current_hash)
return True
except:
logger.error(
f"Exception while running custom-cert.py (check_cert) :\n{format_exc()}",
)
return False
status = 0
try:
makedirs("/opt/bunkerweb/cache/customcert/", exist_ok=True)
# Multisite case
if getenv("MULTISITE") == "yes":
for first_server in getenv("SERVER_NAME").split(" "):
if (
getenv(first_server + "_USE_CUSTOM_HTTPS", getenv("USE_CUSTOM_HTTPS"))
!= "yes"
):
continue
if first_server == "":
continue
cert_path = getenv(first_server + "_CUSTOM_HTTPS_CERT")
logger.info(
f"Checking if certificate {cert_path} changed ...",
)
need_reload = check_cert(cert_path)
if need_reload:
logger.info(
f"Detected change for certificate {cert_path}",
)
status = 1
else:
logger.info(
"No change for certificate {cert_path}",
)
# Singlesite case
elif getenv("USE_CUSTOM_HTTPS") == "yes" and getenv("SERVER_NAME") != "":
cert_path = getenv("CUSTOM_HTTPS_CERT")
logger.info(f"Checking if certificate {cert_path} changed ...")
need_reload = check_cert(cert_path)
if need_reload:
logger.info(f"Detected change for certificate {cert_path}")
status = 1
else:
logger.info(f"No change for certificate {cert_path}")
except:
status = 2
logger.error(f"Exception while running custom-cert.py :\n{format_exc()}")
sys_exit(status)

View File

@ -0,0 +1,44 @@
{
"id": "customcert",
"order": 999,
"name": "Custom HTTPS certificate",
"description": "Choose custom certificate for HTTPS.",
"version": "0.1",
"settings": {
"USE_CUSTOM_HTTPS": {
"context": "multisite",
"default": "no",
"help": "Use custom HTTPS certificate.",
"id": "use-custom-https",
"label": "Use custom certificate",
"regex": "^(yes|no)$",
"type": "check"
},
"CUSTOM_HTTPS_CERT": {
"context": "multisite",
"default": "",
"help": "Full path of the certificate or bundle file.",
"id": "custom-https-cert",
"label": "Certificate path",
"regex": "^.*$",
"type": "text"
},
"CUSTOM_HTTPS_KEY": {
"context": "multisite",
"default": "",
"help": "Full path of the key file.",
"id": "custom-https-key",
"label": "Key path",
"regex": "^.*$",
"type": "text"
}
},
"jobs": [
{
"name": "custom-cert",
"file": "custom-cert.py",
"every": "day",
"reload": true
}
]
}

18
bw/core/db/plugin.json Normal file
View File

@ -0,0 +1,18 @@
{
"id": "db",
"order": 999,
"name": "DB",
"description": "Integrate easily the Database.",
"version": "0.1",
"settings": {
"DATABASE_URI": {
"context": "global",
"default": "sqlite:\/\/\/\/data/db.sqlite3",
"help": "The database URI, following the sqlalchemy format.",
"id": "database-uri",
"label": "The database URI",
"regex": "^.*$",
"type": "text"
}
}
}

27
bw/core/dnsbl/plugin.json Normal file
View File

@ -0,0 +1,27 @@
{
"id": "dnsbl",
"order": 5,
"name": "DNSBL",
"description": "Deny access based on external DNSBL servers.",
"version": "0.1",
"settings": {
"USE_DNSBL": {
"context": "multisite",
"default": "yes",
"help": "Activate DNSBL feature.",
"id": "use-dnsbl",
"label": "Activate DNSBL",
"regex": "^(yes|no)$",
"type": "check"
},
"DNSBL_LIST": {
"context": "global",
"default": "bl.blocklist.de problems.dnsbl.sorbs.net sbl.spamhaus.org xbl.spamhaus.org",
"help": "List of DNSBL servers.",
"id": "dnsbl-list",
"label": "DNSBL list",
"regex": "^.*$",
"type": "text"
}
}
}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,18 @@
{
"id": "errors",
"order": 999,
"name": "Errors",
"description": "Manage default error pages",
"version": "0.1",
"settings": {
"ERRORS": {
"context": "multisite",
"default": "",
"help": "List of HTTP error code and corresponding error pages (404=/my404.html 403=/errors/403.html ...).",
"id": "errors",
"label": "Errors",
"regex": "^.*$",
"type": "text"
}
}
}

View File

@ -0,0 +1,179 @@
#!/usr/bin/python3
from ipaddress import ip_address, ip_network
from os import _exit, getenv, makedirs
from re import match
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
from requests import get
from logger import setup_logger
from jobs import cache_file, cache_hash, is_cached_file, file_hash
def check_line(kind, line):
if kind == "IP":
if "/" in line:
try:
ip_network(line)
return True, line
except ValueError:
pass
else:
try:
ip_address(line)
return True, line
except ValueError:
pass
return False, ""
elif kind == "RDNS":
if match(r"^(\.?[A-Za-z0-9\-]+)*\.[A-Za-z]{2,}$", line):
return True, line.lower()
return False, ""
elif kind == "ASN":
real_line = line.replace("AS", "")
if match(r"^\d+$", real_line):
return True, real_line
elif kind == "USER_AGENT":
return True, line.replace("\\ ", " ").replace("\\.", "%.").replace(
"\\\\", "\\"
).replace("-", "%-")
elif kind == "URI":
if match(r"^/", line):
return True, line
return False, ""
logger = setup_logger("GREYLIST", getenv("LOG_LEVEL", "INFO"))
status = 0
try:
# Check if at least a server has Greylist activated
greylist_activated = False
# Multisite case
if getenv("MULTISITE") == "yes":
for first_server in getenv("SERVER_NAME").split(" "):
if getenv(first_server + "_USE_GREYLIST", getenv("USE_GREYLIST")) == "yes":
greylist_activated = True
break
# Singlesite case
elif getenv("USE_GREYLIST") == "yes":
greylist_activated = True
if not greylist_activated:
logger.info("Greylist is not activated, skipping downloads...")
_exit(0)
# Create directories if they don't exist
makedirs("/opt/bunkerweb/cache/greylist", exist_ok=True)
makedirs("/opt/bunkerweb/tmp/greylist", exist_ok=True)
# Our urls data
urls = {"IP": [], "RDNS": [], "ASN": [], "USER_AGENT": [], "URI": []}
# Don't go further if the cache is fresh
kinds_fresh = {
"IP": True,
"RDNS": True,
"ASN": True,
"USER_AGENT": True,
"URI": True,
"IGNORE_IP": True,
"IGNORE_RDNS": True,
"IGNORE_ASN": True,
"IGNORE_USER_AGENT": True,
"IGNORE_URI": True,
}
all_fresh = True
for kind in kinds_fresh:
if not is_cached_file(f"/opt/bunkerweb/cache/greylist/{kind}.list", "hour"):
kinds_fresh[kind] = False
all_fresh = False
logger.info(
f"Greylist for {kind} is not cached, processing downloads..",
)
else:
logger.info(
f"Greylist for {kind} is already in cache, skipping downloads...",
)
if all_fresh:
_exit(0)
# Get URLs
urls = {
"IP": [],
"RDNS": [],
"ASN": [],
"USER_AGENT": [],
"URI": [],
"IGNORE_IP": [],
"IGNORE_RDNS": [],
"IGNORE_ASN": [],
"IGNORE_USER_AGENT": [],
"IGNORE_URI": [],
}
for kind in urls:
for url in getenv(f"GREYLIST_{kind}_URLS", "").split(" "):
if url != "" and url not in urls[kind]:
urls[kind].append(url)
# Loop on kinds
for kind, urls_list in urls.items():
if kinds_fresh[kind]:
continue
# Write combined data of the kind to a single temp file
for url in urls_list:
try:
logger.info(f"Downloading greylist data from {url} ...")
resp = get(url, stream=True)
if resp.status_code != 200:
continue
i = 0
with open(f"/opt/bunkerweb/tmp/greylist/{kind}.list", "w") as f:
for line in resp.iter_lines(decode_unicode=True):
line = line.strip()
if kind != "USER_AGENT":
line = line.strip().split(" ")[0]
if line == "" or line.startswith("#") or line.startswith(";"):
continue
ok, data = check_line(kind, line)
if ok:
f.write(data + "\n")
i += 1
logger.info(f"Downloaded {i} bad {kind}")
# Check if file has changed
new_hash = file_hash(f"/opt/bunkerweb/tmp/greylist/{kind}.list")
old_hash = cache_hash(f"/opt/bunkerweb/cache/greylist/{kind}.list")
if new_hash == old_hash:
logger.info(
f"New file {kind}.list is identical to cache file, reload is not needed",
)
else:
logger.info(
f"New file {kind}.list is different than cache file, reload is needed",
)
# Put file in cache
cached, err = cache_file(
f"/opt/bunkerweb/tmp/greylist/{kind}.list",
f"/opt/bunkerweb/cache/greylist/{kind}.list",
new_hash,
)
if not cached:
logger.error(f"Error while caching greylist : {err}")
status = 2
if status != 2:
status = 1
except:
status = 2
logger.error(
f"Exception while getting greylist from {url} :\n{format_exc()}"
)
except:
status = 2
logger.error(f"Exception while running greylist-download.py :\n{format_exc()}")
sys_exit(status)

View File

@ -0,0 +1,125 @@
{
"id": "greylist",
"order": 3,
"name": "Greylist",
"description": "Allow access while keeping security features based on internal and external IP/network/rDNS/ASN greylists.",
"version": "0.1",
"settings": {
"USE_GREYLIST": {
"context": "multisite",
"default": "no",
"help": "Activate greylist feature.",
"id": "use-greylist",
"label": "Activate greylisting",
"regex": "^(yes|no)$",
"type": "check"
},
"GREYLIST_IP_URLS": {
"context": "global",
"default": "",
"help": "List of URLs, separated with spaces, containing good IP/network to put into the greylist.",
"id": "greylist-ip-urls",
"label": "Greylist IP/network URLs",
"regex": "^.*$",
"type": "text"
},
"GREYLIST_IP": {
"context": "multisite",
"default": "",
"help": "List of IP/network, separated with spaces, to put into the greylist.",
"id": "greylist-ip",
"label": "Greylist IP/network",
"regex": "^.*$",
"type": "text"
},
"GREYLIST_RDNS": {
"context": "multisite",
"default": "",
"help": "List of reverse DNS suffixes, separated with spaces, to put into the greylist.",
"id": "greylist-rdns",
"label": "Greylist reverse DNS",
"regex": "^.*$",
"type": "text"
},
"GREYLIST_RDNS_URLS": {
"context": "global",
"default": "",
"help": "List of URLs, separated with spaces, containing reverse DNS suffixes to put into the greylist.",
"id": "greylist-rdns-urls",
"label": "Greylist reverse DNS URLs",
"regex": "^.*$",
"type": "text"
},
"GREYLIST_RDNS_GLOBAL": {
"context": "multisite",
"default": "yes",
"help": "Only perform RDNS greylist checks on global IP addresses.",
"id": "greylist-rdns-global",
"label": "Greylist reverse DNS global IPs",
"regex": "^.*$",
"type": "text"
},
"GREYLIST_ASN": {
"context": "multisite",
"default": "",
"help": "List of ASN numbers, separated with spaces, to put into the greylist.",
"id": "greylist-asn",
"label": "Greylist ASN",
"regex": "^.*$",
"type": "text"
},
"GREYLIST_ASN_URLS": {
"context": "global",
"default": "",
"help": "List of URLs, separated with spaces, containing ASN to put into the greylist.",
"id": "greylist-rdns-urls",
"label": "Greylist ASN URLs",
"regex": "^.*$",
"type": "text"
},
"GREYLIST_USER_AGENT": {
"context": "multisite",
"default": "",
"help": "List of User-Agent, separated with spaces, to put into the greylist.",
"id": "greylist-user-agent",
"label": "Greylist User-Agent",
"regex": "^.*$",
"type": "text"
},
"GREYLIST_USER_AGENT_URLS": {
"context": "global",
"default": "",
"help": "List of URLs, separated with spaces, containing good User-Agent to put into the greylist.",
"id": "greylist-user-agent-urls",
"label": "Greylist User-Agent URLs",
"regex": "^.*$",
"type": "text"
},
"GREYLIST_URI": {
"context": "multisite",
"default": "",
"help": "List of URI, separated with spaces, to put into the greylist.",
"id": "greylist-uri",
"label": "Greylist URI",
"regex": "^.*$",
"type": "text"
},
"GREYLIST_URI_URLS": {
"context": "global",
"default": "",
"help": "List of URLs, separated with spaces, containing bad URI to put into the greylist.",
"id": "greylist-uri-urls",
"label": "Greylist URI URLs",
"regex": "^.*$",
"type": "text"
}
},
"jobs": [
{
"name": "greylist-download",
"file": "greylist-download.py",
"every": "hour",
"reload": true
}
]
}

46
bw/core/gzip/plugin.json Normal file
View File

@ -0,0 +1,46 @@
{
"id": "gzip",
"order": 999,
"name": "Gzip",
"description": "Compress HTTP requests with the gzip algorithm.",
"version": "0.1",
"settings": {
"USE_GZIP": {
"context": "multisite",
"default": "no",
"help": "Use gzip",
"id": "use-gzip",
"label": "Use gzip",
"regex": "^(yes|no)$",
"type": "check"
},
"GZIP_TYPES": {
"context": "multisite",
"default": "application/atom+xml application/javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-opentype application/x-font-truetype application/x-font-ttf application/x-javascript application/xhtml+xml application/xml font/eot font/opentype font/otf font/truetype image/svg+xml image/vnd.microsoft.icon image/x-icon image/x-win-bitmap text/css text/javascript text/plain text/xml",
"help": "List of MIME types that will be compressed with gzip.",
"id": "gzip-types",
"label": "MIME types",
"regex": "^.*$",
"type": "text"
},
"GZIP_MIN_LENGTH": {
"context": "multisite",
"default": "1000",
"help": "Minimum length for gzip compression.",
"id": "gzip-min-length",
"label": "Minimum length",
"regex": "^.*$",
"type": "text"
},
"GZIP_COMP_LEVEL": {
"context": "multisite",
"default": "5",
"help": "The compression level of the gzip algorithm.",
"id": "gzip-comp-level",
"label": "Compression level",
"regex": "^[1-9]$",
"type": "select",
"select": ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
}
}
}

118
bw/core/headers/plugin.json Normal file
View File

@ -0,0 +1,118 @@
{
"id": "headers",
"order": 999,
"name": "Headers",
"description": "Manage HTTP headers sent to clients.",
"version": "0.1",
"settings": {
"CUSTOM_HEADER": {
"context": "multisite",
"default": "",
"help": "Custom header to add (HeaderName: HeaderValue).",
"id": "custom-header",
"label": "Custom header (HeaderName: HeaderValue)",
"regex": "^.*$",
"type": "text",
"multiple": "custom-headers"
},
"REMOVE_HEADERS": {
"context": "multisite",
"default": "Server X-Powered-By X-AspNet-Version X-AspNetMvc-Version",
"help": "Headers to remove (Header1 Header2 Header3 ...)",
"id": "remove-headers",
"label": "Remove headers",
"regex": "^.*$",
"type": "text"
},
"STRICT_TRANSPORT_SECURITY": {
"context": "multisite",
"default": "max-age=31536000",
"help": "Value for the Strict-Transport-Security header.",
"id": "strict-transport-security",
"label": "Strict-Transport-Security",
"regex": "^.*$",
"type": "text"
},
"COOKIE_FLAGS": {
"context": "multisite",
"default": "* HttpOnly SameSite=Lax",
"help": "Cookie flags automatically added to all cookies (value accepted for nginx_cookie_flag_module).",
"id": "cookie-flags",
"label": "Cookie flags",
"regex": "^.*$",
"type": "text"
},
"COOKIE_AUTO_SECURE_FLAG": {
"context": "multisite",
"default": "yes",
"help": "Automatically add the Secure flag to all cookies.",
"id": "cookie-auto-secure-flag",
"label": "Cookie auto Secure flag",
"regex": "^(yes|no)$",
"type": "check"
},
"CONTENT_SECURITY_POLICY": {
"context": "multisite",
"default": "object-src 'none'; form-action 'self'; frame-ancestors 'self';",
"help": "Value for the Content-Security-Policy header.",
"id": "content-security-policy",
"label": "Content-Security-Policy",
"regex": "^.*$",
"type": "text"
},
"REFERRER_POLICY": {
"context": "multisite",
"default": "strict-origin-when-cross-origin",
"help": "Value for the Referrer-Policy header.",
"id": "referrer-policy",
"label": "Referrer-Policy",
"regex": "^.*$",
"type": "text"
},
"PERMISSIONS_POLICY": {
"context": "multisite",
"default": "accelerometer=(), ambient-light-sensor=(), autoplay=(), battery=(), camera=(), cross-origin-isolated=(), display-capture=(), document-domain=(), encrypted-media=(), execution-while-not-rendered=(), execution-while-out-of-viewport=(), fullscreen=(), geolocation=(), gyroscope=(), hid=(), idle-detection=(), magnetometer=(), microphone=(), midi=(), navigation-override=(), payment=(), picture-in-picture=(), publickey-credentials-get=(), screen-wake-lock=(), serial=(), usb=(), web-share=(), xr-spatial-tracking=()",
"help": "Value for the Permissions-Policy header.",
"id": "permissions-policy",
"label": "Permissions-Policy",
"regex": "^.*$",
"type": "text"
},
"FEATURE_POLICY": {
"context": "multisite",
"default": "accelerometer 'none'; ambient-light-sensor 'none'; autoplay 'none'; battery 'none'; camera 'none'; display-capture 'none'; document-domain 'none'; encrypted-media 'none'; execution-while-not-rendered 'none'; execution-while-out-of-viewport 'none'; fullscreen 'none'; 'none'; geolocation 'none'; gyroscope 'none'; layout-animation 'none'; legacy-image-formats 'none'; magnetometer 'none'; microphone 'none'; midi 'none'; navigation-override 'none'; payment 'none'; picture-in-picture 'none'; publickey-credentials-get 'none'; speaker-selection 'none'; sync-xhr 'none'; unoptimized-images 'none'; unsized-media 'none'; usb 'none'; screen-wake-lock 'none'; web-share 'none'; xr-spatial-tracking 'none';",
"help": "Value for the Feature-Policy header.",
"id": "feature-policy",
"label": "Feature-Policy",
"regex": "^.*$",
"type": "text"
},
"X_FRAME_OPTIONS": {
"context": "multisite",
"default": "SAMEORIGIN",
"help": "Value for the X-Frame-Options header.",
"id": "x-frame-options",
"label": "X-Frame-Options",
"regex": "^.*$",
"type": "text"
},
"X_CONTENT_TYPE_OPTIONS": {
"context": "multisite",
"default": "nosniff",
"help": "Value for the X-Content-Type-Options header.",
"id": "x-content-type-options",
"label": "X-Content-Type-Options",
"regex": "^.*$",
"type": "text"
},
"X_XSS_PROTECTION": {
"context": "multisite",
"default": "1; mode=block",
"help": "Value for the X-XSS-Protection header.",
"id": "x-xss-protection",
"label": "X-XSS-Protection",
"regex": "^.*$",
"type": "text"
}
}
}

View File

@ -0,0 +1,18 @@
{
"id": "inject",
"order": 999,
"name": "HTML injection",
"description": "Inject custom HTML code before the </body> tag.",
"version": "0.1",
"settings": {
"INJECT_BODY": {
"context": "multisite",
"default": "",
"help": "The HTML code to inject.",
"id": "inject-body",
"label": "HTML code",
"regex": "^.*$",
"type": "text"
}
}
}

View File

@ -0,0 +1,100 @@
#!/usr/bin/python3
from io import BytesIO
from os import getenv, makedirs, chmod, stat, _exit
from os.path import isfile, dirname
from stat import S_IEXEC
from sys import exit as sys_exit, path as sys_path
from uuid import uuid4
from glob import glob
from json import loads
from shutil import copytree, rmtree
from traceback import format_exc
from zipfile import ZipFile
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
from requests import get
from logger import setup_logger
logger = setup_logger("Jobs", getenv("LOG_LEVEL", "INFO"))
status = 0
def install_plugin(plugin_dir):
# Load plugin.json
metadata = {}
with open(f"{plugin_dir}plugin.json", "r") as f:
metadata = loads(f.read())
# Don't go further if plugin is already installed
if isfile(f"/data/plugins/{metadata['id']}/plugin.json"):
logger.info(
f"Skipping installation of plugin {metadata['id']} (already installed)",
)
return
# Copy the plugin
copytree(plugin_dir, f"/data/plugins/{metadata['id']}")
# Add u+x permissions to jobs files
for job_file in glob(f"{plugin_dir}jobs/*"):
st = stat(job_file)
chmod(job_file, st.st_mode | S_IEXEC)
try:
# Check if we have plugins to download
plugin_urls = getenv("EXTERNAL_PLUGIN_URLS", "")
if plugin_urls == "":
logger.info("No external plugins to download")
_exit(0)
# Loop on URLs
for plugin_url in plugin_urls.split(" "):
# Download ZIP file
try:
req = get(plugin_url)
except:
logger.error(
f"Exception while downloading plugin(s) from {plugin_url} :",
)
print(format_exc())
status = 2
continue
# Extract it to tmp folder
temp_dir = "/opt/bunkerweb/tmp/plugins-" + str(uuid4()) + "/"
try:
makedirs(temp_dir, exist_ok=True)
with ZipFile(BytesIO(req.content)) as zf:
zf.extractall(path=temp_dir)
except:
logger.error(
f"Exception while decompressing plugin(s) from {plugin_url} :",
)
print(format_exc())
status = 2
continue
# Install plugins
try:
for plugin_dir in glob(temp_dir + "**/plugin.json", recursive=True):
install_plugin(dirname(plugin_dir) + "/")
except:
logger.error(
f"Exception while installing plugin(s) from {plugin_url} :",
)
print(format_exc())
status = 2
continue
except:
status = 2
logger.error(f"Exception while running download-plugins.py :\n{format_exc()}")
for plugin_tmp in glob("/opt/bunkerweb/tmp/plugins-*/"):
rmtree(plugin_tmp)
sys_exit(status)

71
bw/core/jobs/jobs/mmdb-asn.py Executable file
View File

@ -0,0 +1,71 @@
#!/usr/bin/python3
from datetime import date
from gzip import decompress
from os import _exit, getenv
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
from maxminddb import open_database
from requests import get
from logger import setup_logger
from jobs import cache_file, cache_hash, file_hash, is_cached_file
logger = setup_logger("JOBS", getenv("LOG_LEVEL", "INFO"))
status = 0
try:
# Don't go further if the cache is fresh
if is_cached_file("/opt/bunkerweb/cache/asn.mmdb", "month"):
logger.info("asn.mmdb is already in cache, skipping download...")
_exit(0)
# Compute the mmdb URL
today = date.today()
mmdb_url = "https://download.db-ip.com/free/dbip-asn-lite-{}-{}.mmdb.gz".format(
today.strftime("%Y"), today.strftime("%m")
)
# Download the mmdb file
logger.info(f"Downloading mmdb file from url {mmdb_url} ...")
resp = get(mmdb_url)
# Save it to temp
logger.info("Saving mmdb file to tmp ...")
with open("/opt/bunkerweb/tmp/asn.mmdb", "wb") as f:
f.write(decompress(resp.content))
# Try to load it
logger.info("Checking if mmdb file is valid ...")
with open_database("/opt/bunkerweb/tmp/asn.mmdb") as reader:
pass
# Check if file has changed
new_hash = file_hash("/opt/bunkerweb/tmp/asn.mmdb")
old_hash = cache_hash("/opt/bunkerweb/cache/asn.mmdb")
if new_hash == old_hash:
logger.info("New file is identical to cache file, reload is not needed")
_exit(0)
# Move it to cache folder
logger.info("Moving mmdb file to cache ...")
cached, err = cache_file(
"/opt/bunkerweb/tmp/asn.mmdb", "/opt/bunkerweb/cache/asn.mmdb", new_hash
)
if not cached:
logger.error(f"Error while caching mmdb file : {err}")
_exit(2)
# Success
logger.info(f"Downloaded new mmdb from {mmdb_url}")
status = 1
except:
status = 2
logger.error(f"Exception while running mmdb-asn.py :\n{format_exc()}")
sys_exit(status)

View File

@ -0,0 +1,73 @@
#!/usr/bin/python3
from datetime import date
from gzip import decompress
from os import _exit, getenv
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
from requests import get
from maxminddb import open_database
from logger import setup_logger
from jobs import cache_file, cache_hash, file_hash, is_cached_file
logger = setup_logger("JOBS", getenv("LOG_LEVEL", "INFO"))
status = 0
try:
# Don't go further if the cache is fresh
if is_cached_file("/opt/bunkerweb/cache/country.mmdb", "month"):
logger.info("country.mmdb is already in cache, skipping download...")
_exit(0)
# Compute the mmdb URL
today = date.today()
mmdb_url = "https://download.db-ip.com/free/dbip-country-lite-{}-{}.mmdb.gz".format(
today.strftime("%Y"), today.strftime("%m")
)
# Download the mmdb file
logger.info(f"Downloading mmdb file from url {mmdb_url} ...")
resp = get(mmdb_url)
# Save it to temp
logger.info("Saving mmdb file to tmp ...")
with open("/opt/bunkerweb/tmp/country.mmdb", "wb") as f:
f.write(decompress(resp.content))
# Try to load it
logger.info("Checking if mmdb file is valid ...")
with open_database("/opt/bunkerweb/tmp/country.mmdb") as reader:
pass
# Check if file has changed
new_hash = file_hash("/opt/bunkerweb/tmp/country.mmdb")
old_hash = cache_hash("/opt/bunkerweb/cache/country.mmdb")
if new_hash == old_hash:
logger.info("New file is identical to cache file, reload is not needed")
_exit(0)
# Move it to cache folder
logger.info("Moving mmdb file to cache ...")
cached, err = cache_file(
"/opt/bunkerweb/tmp/country.mmdb",
"/opt/bunkerweb/cache/country.mmdb",
new_hash,
)
if not cached:
logger.error(f"Error while caching mmdb file : {err}")
_exit(2)
# Success
logger.info(f"Downloaded new mmdb from {mmdb_url}")
status = 1
except:
status = 2
logger.error(f"Exception while running mmdb-country.py :\n{format_exc()}")
sys_exit(status)

28
bw/core/jobs/plugin.json Normal file
View File

@ -0,0 +1,28 @@
{
"id": "jobs",
"order": 999,
"name": "Jobs",
"description": "Fake core plugin for internal jobs.",
"version": "0.1",
"settings": {},
"jobs": [
{
"name": "mmdb-country",
"file": "mmdb-country.py",
"every": "week",
"reload": true
},
{
"name": "mmdb-asn",
"file": "mmdb-asn.py",
"every": "week",
"reload": true
},
{
"name": "download-plugins",
"file": "download-plugins.py",
"every": "once",
"reload": false
}
]
}

View File

@ -0,0 +1,63 @@
#!/usr/bin/python3
import sys, os, traceback
sys.path.append("/opt/bunkerweb/deps/python")
sys.path.append("/opt/bunkerweb/utils")
sys.path.append("/opt/bunkerweb/api")
from logger import setup_logger
from API import API
logger = setup_logger("Lets-encrypt", os.getenv("LOG_LEVEL", "INFO"))
status = 0
try:
# Get env vars
is_kubernetes_mode = os.getenv("KUBERNETES_MODE") == "yes"
is_swarm_mode = os.getenv("SWARM_MODE") == "yes"
is_autoconf_mode = os.getenv("AUTOCONF_MODE") == "yes"
token = os.getenv("CERTBOT_TOKEN")
validation = os.getenv("CERTBOT_VALIDATION")
# Cluster case
if is_kubernetes_mode or is_swarm_mode or is_autoconf_mode:
for variable, value in os.environ.items():
if not variable.startswith("CLUSTER_INSTANCE_"):
continue
endpoint = value.split(" ")[0]
host = value.split(" ")[1]
api = API(endpoint, host=host)
sent, err, status, resp = api.request(
"POST",
"/lets-encrypt/challenge",
data={"token": token, "validation": validation},
)
if not sent:
status = 1
logger.error(
f"Can't send API request to {api.get_endpoint()}/lets-encrypt/challenge : {err}"
)
else:
if status != 200:
status = 1
logger.error(
f"Error while sending API request to {api.get_endpoint()}/lets-encrypt/challenge : status = {resp['status']}, msg = {resp['msg']}",
)
else:
logger.info(
f"Successfully sent API request to {api.get_endpoint()}/lets-encrypt/challenge",
)
# Docker or Linux case
else:
root_dir = "/opt/bunkerweb/tmp/lets-encrypt/.well-known/acme-challenge/"
os.makedirs(root_dir, exist_ok=True)
with open(root_dir + token, "w") as f:
f.write(validation)
except:
status = 1
logger.error("Exception while running certbot-auth.py :")
print(traceback.format_exc())
sys.exit(status)

View File

@ -0,0 +1,61 @@
#!/usr/bin/python3
import sys, os, traceback
sys.path.append("/opt/bunkerweb/deps/python")
sys.path.append("/opt/bunkerweb/utils")
sys.path.append("/opt/bunkerweb/api")
from logger import setup_logger
from API import API
logger = setup_logger("Lets-encrypt", os.getenv("LOG_LEVEL", "INFO"))
status = 0
try:
# Get env vars
is_kubernetes_mode = os.getenv("KUBERNETES_MODE") == "yes"
is_swarm_mode = os.getenv("SWARM_MODE") == "yes"
is_autoconf_mode = os.getenv("AUTOCONF_MODE") == "yes"
token = os.getenv("CERTBOT_TOKEN")
# Cluster case
if is_kubernetes_mode or is_swarm_mode or is_autoconf_mode:
for variable, value in os.environ.items():
if not variable.startswith("CLUSTER_INSTANCE_"):
continue
endpoint = value.split(" ")[0]
host = value.split(" ")[1]
api = API(endpoint, host=host)
sent, err, status, resp = api.request(
"DELETE", "/lets-encrypt/challenge", data={"token": token}
)
if not sent:
status = 1
logger.error(
f"Can't send API request to {api.get_endpoint()}/lets-encrypt/challenge : {err}"
)
else:
if status != 200:
status = 1
logger.error(
f"Error while sending API request to {api.get_endpoint()}/lets-encrypt/challenge : status = {resp['status']}, msg = {resp['msg']}",
)
else:
logger.info(
f"Successfully sent API request to {api.get_endpoint()}/lets-encrypt/challenge",
)
# Docker or Linux case
else:
challenge_path = (
f"/opt/bunkerweb/tmp/lets-encrypt/.well-known/acme-challenge/{token}"
)
if os.path.isfile(challenge_path):
os.remove(challenge_path)
except:
status = 1
logger.error("Exception while running certbot-cleanup.py :")
print(traceback.format_exc())
sys.exit(status)

View File

@ -0,0 +1,118 @@
#!/usr/bin/python3
from asyncio import run
from io import BytesIO
from os import environ, getenv
from os.path import exists
from subprocess import DEVNULL, STDOUT
from sys import exit as sys_exit, path as sys_path
from tarfile import open as tar_open
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
sys_path.append("/opt/bunkerweb/api")
from logger import setup_logger
from API import API
logger = setup_logger("Lets-encrypt", getenv("LOG_LEVEL", "INFO"))
status = 0
try:
# Get env vars
bw_integration = None
if getenv("KUBERNETES_MODE") == "yes":
bw_integration = "Swarm"
elif getenv("SWARM_MODE") == "yes":
bw_integration = "Kubernetes"
elif getenv("AUTOCONF_MODE") == "yes":
bw_integration = "Autoconf"
elif exists("/opt/bunkerweb/INTEGRATION"):
with open("/opt/bunkerweb/INTEGRATION", "r") as f:
bw_integration = f.read().strip()
token = getenv("CERTBOT_TOKEN")
# Cluster case
if bw_integration in ("Swarm", "Kubernetes", "Autoconf"):
# Create tarball of /data/cache/letsencrypt
tgz = BytesIO()
with tar_open(mode="w:gz", fileobj=tgz) as tf:
tf.add("/data/cache/letsencrypt", arcname=".")
tgz.seek(0, 0)
files = {"archive.tar.gz": tgz}
for variable, value in environ.items():
if not variable.startswith("CLUSTER_INSTANCE_"):
continue
endpoint = value.split(" ")[0]
host = value.split(" ")[1]
api = API(endpoint, host=host)
sent, err, status, resp = api.request(
"POST", "/lets-encrypt/certificates", files=files
)
if not sent:
status = 1
logger.error(
f"Can't send API request to {api.get_endpoint()}/lets-encrypt/certificates : {err}"
)
else:
if status != 200:
status = 1
logger.error(
f"Error while sending API request to {api.get_endpoint()}/lets-encrypt/certificates : status = {resp['status']}, msg = {resp['msg']}"
)
else:
logger.info(
f"Successfully sent API request to {api.get_endpoint()}/lets-encrypt/certificates",
)
sent, err, status, resp = api.request("POST", "/reload")
if not sent:
status = 1
logger.error(
f"Can't send API request to {api.get_endpoint()}/reload : {err}"
)
else:
if status != 200:
status = 1
logger.error(
f"Error while sending API request to {api.get_endpoint()}/reload : status = {resp['status']}, msg = {resp['msg']}"
)
else:
logger.info(
f"Successfully sent API request to {api.get_endpoint()}/reload"
)
# Docker or Linux case
elif bw_integration == "Docker":
api = API(f"{getenv('BW_API_URL')}:5000")
sent, err, status, resp = api.request("POST", "/reload")
if not sent:
status = 1
logger.error(
f"Can't send API request to {api.get_endpoint()}/reload : {err}"
)
else:
if status != 200:
status = 1
logger.error(
f"Error while sending API request to {api.get_endpoint()}/reload : status = {resp['status']}, msg = {resp['msg']}"
)
else:
logger.info(
f"Successfully sent API request to {api.get_endpoint()}/reload"
)
elif bw_integration == "Linux":
cmd = "/usr/sbin/nginx -s reload"
proc = run(cmd.split(" "), stdin=DEVNULL, stderr=STDOUT)
if proc.returncode != 0:
status = 1
logger.error("Error while reloading nginx")
else:
logger.info("Successfully reloaded nginx")
except:
status = 1
logger.error(f"Exception while running certbot-deploy.py :\n{format_exc()}")
sys_exit(status)

View File

@ -0,0 +1,96 @@
#!/usr/bin/python3
from os import environ, getenv
from os.path import exists
from subprocess import DEVNULL, STDOUT, run
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
sys_path.append("/opt/bunkerweb/deps/python")
sys_path.append("/opt/bunkerweb/utils")
from logger import setup_logger
def certbot_new(domains, email):
cmd = f"/opt/bunkerweb/deps/python/bin/certbot certonly --manual --preferred-challenges=http --manual-auth-hook /opt/bunkerweb/core/letsencrypt/jobs/certbot-auth.py --manual-cleanup-hook /opt/bunkerweb/core/letsencrypt/jobs/certbot-cleanup.py -n -d {domains} --email {email} --agree-tos"
if getenv("USE_LETS_ENCRYPT_STAGING") == "yes":
cmd += " --staging"
environ["PYTHONPATH"] = "/opt/bunkerweb/deps/python"
proc = run(
cmd.split(" "),
stdin=DEVNULL,
stderr=STDOUT,
env=environ,
)
return proc.returncode
logger = setup_logger("LETS-ENCRYPT", getenv("LOG_LEVEL", "INFO"))
status = 0
try:
# Multisite case
if getenv("MULTISITE") == "yes":
for first_server in getenv("SERVER_NAME").split(" "):
if (
getenv(f"{first_server}_AUTO_LETS_ENCRYPT", getenv("AUTO_LETS_ENCRYPT"))
!= "yes"
):
continue
if first_server == "":
continue
real_server_name = getenv(f"{first_server}_SERVER_NAME", first_server)
domains = real_server_name.replace(" ", ",")
if exists(f"/etc/letsencrypt/live/{first_server}/cert.pem"):
logger.info(
f"Certificates already exists for domain(s) {domains}",
)
continue
real_email = getenv(
f"{first_server}_EMAIL_LETS_ENCRYPT",
getenv("EMAIL_LETS_ENCRYPT", f"contact@{first_server}"),
)
if real_email == "":
real_email = f"contact@{first_server}"
logger.info(
f"Asking certificates for domains : {domains} (email = {real_email}) ...",
)
if certbot_new(domains, real_email) != 0:
status = 1
logger.error(
f"Certificate generation failed for domain(s) {domains} ...",
)
else:
logger.info(
f"Certificate generation succeeded for domain(s) : {domains}"
)
# Singlesite case
elif getenv("AUTO_LETS_ENCRYPT") == "yes" and getenv("SERVER_NAME") != "":
first_server = getenv("SERVER_NAME").split(" ")[0]
domains = getenv("SERVER_NAME").replace(" ", ",")
if exists(f"/etc/letsencrypt/live/{first_server}/cert.pem"):
logger.info(f"Certificates already exists for domain(s) {domains}")
else:
real_email = getenv("EMAIL_LETS_ENCRYPT", f"contact@{first_server}")
if real_email == "":
real_email = f"contact@{first_server}"
logger.info(
f"Asking certificates for domain(s) : {domains} (email = {real_email}) ...",
)
if certbot_new(domains, real_email) != 0:
status = 2
logger.error(f"Certificate generation failed for domain(s) : {domains}")
else:
logger.info(
f"Certificate generation succeeded for domain(s) : {domains}"
)
except:
status = 1
logger.error(f"Exception while running certbot-new.py :\n{format_exc()}")
sys_exit(status)

Some files were not shown because too many files have changed in this diff Show More