Add tests for core plugins
This commit is contained in:
parent
29f020f15e
commit
e6bb9fb55f
|
@ -10,7 +10,12 @@ pip install pip --upgrade > /dev/null && pip install pip-compile-multi pip-upgra
|
|||
|
||||
echo "Updating requirements.in files"
|
||||
|
||||
files=("../../docs/requirements.txt" "../common/db/requirements.in" "../common/gen/requirements.in" "../scheduler/requirements.in" "../ui/requirements.in" "../../tests/requirements.txt" "../../tests/ui/requirements.txt")
|
||||
files=("../../docs/requirements.txt" "../common/db/requirements.in" "../common/gen/requirements.in" "../scheduler/requirements.in" "../ui/requirements.in")
|
||||
|
||||
for file in $(find ../../tests -iname "requirements.txt")
|
||||
do
|
||||
files+=("$file")
|
||||
done
|
||||
|
||||
for file in "${files[@]}"
|
||||
do
|
||||
|
@ -31,6 +36,8 @@ do
|
|||
echo "No need to generate hashes for $file"
|
||||
fi
|
||||
|
||||
echo " "
|
||||
|
||||
cd -
|
||||
done
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
env
|
|
@ -0,0 +1,5 @@
|
|||
FROM docker
|
||||
|
||||
COPY . .
|
||||
|
||||
ENTRYPOINT [ "./tests.sh" ]
|
|
@ -0,0 +1,25 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
# Install firefox and geckodriver
|
||||
RUN apk add --no-cache --virtual .build-deps curl grep zip && \
|
||||
apk add --no-cache firefox
|
||||
|
||||
# Installing geckodriver for firefox...
|
||||
RUN GECKODRIVER_VERSION=`curl -i https://github.com/mozilla/geckodriver/releases/latest | grep -Po 'v[0-9]+.[0-9]+.[0-9]+'` && \
|
||||
wget -O geckodriver.tar.gz https://github.com/mozilla/geckodriver/releases/download/$GECKODRIVER_VERSION/geckodriver-$GECKODRIVER_VERSION-linux64.tar.gz && \
|
||||
tar -C /usr/local/bin -xzvf geckodriver.tar.gz && \
|
||||
chmod +x /usr/local/bin/geckodriver && \
|
||||
rm geckodriver.tar.gz
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,18 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_ANTIBOT: "no"
|
||||
ANTIBOT_URI: "/challenge"
|
||||
extra_hosts:
|
||||
- "www.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
|
@ -0,0 +1,68 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24"
|
||||
HTTP_PORT: "80"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
USE_REVERSE_PROXY: "yes"
|
||||
REVERSE_PROXY_HOST: "http://app1:8080"
|
||||
REVERSE_PROXY_URL: "/"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? ANTIBOT settings
|
||||
USE_ANTIBOT: "no"
|
||||
ANTIBOT_URI: "/challenge"
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
app1:
|
||||
image: nginxdemos/nginx-hello
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.4
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-docker:
|
|
@ -0,0 +1,95 @@
|
|||
from contextlib import suppress
|
||||
from os import getenv
|
||||
from requests import get
|
||||
from requests.exceptions import RequestException
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.firefox.options import Options
|
||||
from selenium.common.exceptions import NoSuchElementException
|
||||
from time import sleep
|
||||
from traceback import format_exc
|
||||
|
||||
try:
|
||||
ready = False
|
||||
retries = 0
|
||||
while not ready:
|
||||
with suppress(RequestException):
|
||||
status_code = get(
|
||||
"http://www.example.com", headers={"Host": "www.example.com"}
|
||||
).status_code
|
||||
|
||||
if status_code >= 500:
|
||||
print("❌ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
ready = status_code < 400
|
||||
|
||||
if retries > 10:
|
||||
print("❌ The service took too long to be ready, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not ready:
|
||||
retries += 1
|
||||
print(
|
||||
"⚠️ Waiting for the service to be ready, retrying in 5s ...", flush=True
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
firefox_options = Options()
|
||||
firefox_options.add_argument("--headless")
|
||||
|
||||
test_type = getenv("USE_ANTIBOT", "no")
|
||||
antibot_uri = getenv("ANTIBOT_URI", "/challenge")
|
||||
|
||||
if test_type != "javascript":
|
||||
print("ℹ️ Starting Firefox ...", flush=True)
|
||||
with webdriver.Firefox(options=firefox_options) as driver:
|
||||
driver.delete_all_cookies()
|
||||
driver.maximize_window()
|
||||
|
||||
print("ℹ️ Navigating to http://www.example.com ...", flush=True)
|
||||
|
||||
driver.get("http://www.example.com")
|
||||
|
||||
if driver.current_url.endswith(antibot_uri) and test_type == "no":
|
||||
print("❌ Antibot is enabled, it shouldn't be ...", flush=True)
|
||||
exit(1)
|
||||
elif test_type == "captcha":
|
||||
if not driver.current_url.endswith(antibot_uri):
|
||||
print(
|
||||
"❌ Antibot is disabled or the endpoint is wrong ...", flush=True
|
||||
)
|
||||
exit(1)
|
||||
try:
|
||||
driver.find_element(By.XPATH, "//input[@name='captcha']")
|
||||
except NoSuchElementException:
|
||||
print("❌ The captcha input is missing ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
print(
|
||||
f"✅ The captcha input is present{' and the endpoint is correct' if antibot_uri != '/challenge' else ''} ...",
|
||||
flush=True,
|
||||
)
|
||||
else:
|
||||
print("✅ Antibot is disabled, as expected ...", flush=True)
|
||||
else:
|
||||
status_code = get(
|
||||
"http://www.example.com",
|
||||
headers={"Host": "www.example.com"},
|
||||
allow_redirects=False,
|
||||
).status_code
|
||||
if status_code >= 500:
|
||||
print("ℹ️ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif status_code != 302:
|
||||
print(
|
||||
"❌ The server should have redirected to the antibot page ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ Status code is 302, as expected ...", flush=True)
|
||||
except SystemExit:
|
||||
exit(1)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1,2 @@
|
|||
requests==2.30.0
|
||||
selenium==4.9.1
|
|
@ -0,0 +1,110 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "🤖 Building antibot stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker app1
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🤖 Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🤖 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@ANTIBOT_URI: "/custom"@ANTIBOT_URI: "/challenge"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_ANTIBOT: ".*"$@USE_ANTIBOT: "no"@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🤖 Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🤖 Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🤖 Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
for test in "deactivated" "javascript" "captcha" "endpoint"
|
||||
do
|
||||
if [ "$test" = "deactivated" ] ; then
|
||||
echo "🤖 Running tests without antibot ..."
|
||||
elif [ "$test" = "endpoint" ] ; then
|
||||
echo "🤖 Running tests where antibot is on a different endpoint ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@ANTIBOT_URI: "/challenge"@ANTIBOT_URI: "/custom"@' {} \;
|
||||
elif [ "$test" != "deactivated" ] ; then
|
||||
echo "🤖 Running tests with antibot \"$test\" ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_ANTIBOT: ".*"$@USE_ANTIBOT: "'"${test}"'"@' {} \;
|
||||
fi
|
||||
|
||||
echo "🤖 Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🤖 Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "🤖 Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("antibot-bw-1" "antibot-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "🤖 Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "🤖 Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
docker compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🤖 Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "🤖 Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "🤖 Tests are done ! ✅"
|
|
@ -0,0 +1,25 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
# Install firefox and geckodriver
|
||||
RUN apk add --no-cache --virtual .build-deps curl grep zip && \
|
||||
apk add --no-cache firefox
|
||||
|
||||
# Installing geckodriver for firefox...
|
||||
RUN GECKODRIVER_VERSION=`curl -i https://github.com/mozilla/geckodriver/releases/latest | grep -Po 'v[0-9]+.[0-9]+.[0-9]+'` && \
|
||||
wget -O geckodriver.tar.gz https://github.com/mozilla/geckodriver/releases/download/$GECKODRIVER_VERSION/geckodriver-$GECKODRIVER_VERSION-linux64.tar.gz && \
|
||||
tar -C /usr/local/bin -xzvf geckodriver.tar.gz && \
|
||||
chmod +x /usr/local/bin/geckodriver && \
|
||||
rm geckodriver.tar.gz
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,20 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_AUTH_BASIC: "no"
|
||||
AUTH_BASIC_LOCATION: "sitewide"
|
||||
AUTH_BASIC_USER: "bunkerity"
|
||||
AUTH_BASIC_PASSWORD: "Secr3tP@ssw0rd"
|
||||
extra_hosts:
|
||||
- "www.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
|
@ -0,0 +1,70 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24"
|
||||
HTTP_PORT: "80"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
USE_REVERSE_PROXY: "yes"
|
||||
REVERSE_PROXY_HOST: "http://app1:8080"
|
||||
REVERSE_PROXY_URL: "/"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? AUTH_BASIC settings
|
||||
USE_AUTH_BASIC: "no"
|
||||
AUTH_BASIC_LOCATION: "sitewide"
|
||||
AUTH_BASIC_USER: "bunkerity"
|
||||
AUTH_BASIC_PASSWORD: "Secr3tP@ssw0rd"
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
app1:
|
||||
image: nginxdemos/nginx-hello
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.4
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-docker:
|
|
@ -0,0 +1,106 @@
|
|||
from contextlib import suppress
|
||||
from os import getenv
|
||||
from requests import get
|
||||
from requests.exceptions import RequestException
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.firefox.options import Options
|
||||
from selenium.common.exceptions import NoSuchElementException
|
||||
from time import sleep
|
||||
from traceback import format_exc
|
||||
|
||||
try:
|
||||
ready = False
|
||||
retries = 0
|
||||
while not ready:
|
||||
with suppress(RequestException):
|
||||
status_code = get(
|
||||
"http://www.example.com", headers={"Host": "www.example.com"}
|
||||
).status_code
|
||||
|
||||
if status_code >= 500:
|
||||
print("❌ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
ready = status_code <= 401
|
||||
|
||||
if retries > 10:
|
||||
print("❌ The service took too long to be ready, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not ready:
|
||||
retries += 1
|
||||
print(
|
||||
"⚠️ Waiting for the service to be ready, retrying in 5s ...", flush=True
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
firefox_options = Options()
|
||||
firefox_options.add_argument("--headless")
|
||||
|
||||
use_auth_basic = getenv("USE_AUTH_BASIC", "no")
|
||||
auth_basic_location = getenv("AUTH_BASIC_LOCATION", "sitewide")
|
||||
auth_basic_username = getenv("AUTH_BASIC_USER", "bunkerity")
|
||||
auth_basic_password = getenv("AUTH_BASIC_PASSWORD", "Secr3tP@ssw0rd")
|
||||
|
||||
print("ℹ️ Starting Firefox ...", flush=True)
|
||||
with webdriver.Firefox(options=firefox_options) as driver:
|
||||
driver.delete_all_cookies()
|
||||
driver.maximize_window()
|
||||
|
||||
if use_auth_basic == "no" or auth_basic_location != "sitewide":
|
||||
print("ℹ️ Navigating to http://www.example.com ...", flush=True)
|
||||
driver.get("http://www.example.com")
|
||||
|
||||
try:
|
||||
driver.find_element(By.XPATH, "//img[@alt='NGINX Logo']")
|
||||
except NoSuchElementException:
|
||||
print("❌ The page is not accessible ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
if use_auth_basic == "no":
|
||||
print("✅ Auth-basic is disabled, as expected ...", flush=True)
|
||||
else:
|
||||
print(
|
||||
f"ℹ️ Trying to access http://www.example.com{auth_basic_location} ...",
|
||||
flush=True,
|
||||
)
|
||||
status_code = get(
|
||||
f"http://www.example.com{auth_basic_location}",
|
||||
headers={"Host": "www.example.com"},
|
||||
).status_code
|
||||
|
||||
if status_code != 401:
|
||||
print("❌ The page is accessible without auth-basic ...", flush=True)
|
||||
exit(1)
|
||||
print(
|
||||
"✅ Auth-basic is enabled and working in the expected location ...",
|
||||
)
|
||||
else:
|
||||
print(f"ℹ️ Trying to access http://www.example.com ...", flush=True)
|
||||
status_code = get(
|
||||
"http://www.example.com", headers={"Host": "www.example.com"}
|
||||
).status_code
|
||||
|
||||
if status_code != 401:
|
||||
print("❌ The page is accessible without auth-basic ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
print(
|
||||
f"ℹ️ Trying to access http://{auth_basic_username}:{auth_basic_password}@www.example.com ...",
|
||||
flush=True,
|
||||
)
|
||||
driver.get(
|
||||
f"http://{auth_basic_username}:{auth_basic_password}@www.example.com"
|
||||
)
|
||||
|
||||
try:
|
||||
driver.find_element(By.XPATH, "//img[@alt='NGINX Logo']")
|
||||
except NoSuchElementException:
|
||||
print("❌ The page is not accessible ...", flush=True)
|
||||
exit(1)
|
||||
print("✅ Auth-basic is enabled and working, as expected ...", flush=True)
|
||||
except SystemExit:
|
||||
exit(1)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1,2 @@
|
|||
requests==2.30.0
|
||||
selenium==4.9.1
|
|
@ -0,0 +1,119 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "🔐 Building authbasic stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker app1
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔐 Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔐 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_AUTH_BASIC: "yes"@USE_AUTH_BASIC: "no"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@AUTH_BASIC_LOCATION: "/auth"@AUTH_BASIC_LOCATION: "sitewide"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@AUTH_BASIC_USER: "admin"@AUTH_BASIC_USER: "bunkerity"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@AUTH_BASIC_PASSWORD: "password"@AUTH_BASIC_PASSWORD: "Secr3tP\@ssw0rd"@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🔐 Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔐 Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🔐 Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
for test in "deactivated" "sitewide" "location" "user" "password"
|
||||
do
|
||||
if [ "$test" = "deactivated" ] ; then
|
||||
echo "🔐 Running tests without authbasic ..."
|
||||
elif [ "$test" = "sitewide" ] ; then
|
||||
echo "🔐 Running tests with sitewide authbasic ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_AUTH_BASIC: "no"@USE_AUTH_BASIC: "yes"@' {} \;
|
||||
elif [ "$test" = "location" ] ; then
|
||||
echo "🔐 Running tests with the location changed ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@AUTH_BASIC_LOCATION: "sitewide"@AUTH_BASIC_LOCATION: "/auth"@' {} \;
|
||||
elif [ "$test" = "user" ] ; then
|
||||
echo "🔐 Running tests with the user changed ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@AUTH_BASIC_LOCATION: "/auth"@AUTH_BASIC_LOCATION: "sitewide"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@AUTH_BASIC_USER: "bunkerity"@AUTH_BASIC_USER: "admin"@' {} \;
|
||||
elif [ "$test" = "password" ] ; then
|
||||
echo "🔐 Running tests with the password changed ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@AUTH_BASIC_PASSWORD: "Secr3tP\@ssw0rd"@AUTH_BASIC_PASSWORD: "password"@' {} \;
|
||||
fi
|
||||
|
||||
echo "🔐 Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔐 Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "🔐 Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("authbasic-bw-1" "authbasic-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "🔐 Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "🔐 Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
docker compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔐 Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "🔐 Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "🔐 Tests are done ! ✅"
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,25 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
USE_BAD_BEHAVIOR: "yes"
|
||||
BAD_BEHAVIOR_STATUS_CODES: "400 401 403 404 405 429 444"
|
||||
BAD_BEHAVIOR_BAN_TIME: "86400"
|
||||
BAD_BEHAVIOR_THRESHOLD: "10"
|
||||
BAD_BEHAVIOR_COUNT_TIME: "60"
|
||||
extra_hosts:
|
||||
- "www.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-docker:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
||||
bw-docker:
|
||||
external: true
|
|
@ -0,0 +1,65 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
volumes:
|
||||
- ./index.html:/var/www/html/index.html
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24 192.168.0.3"
|
||||
HTTP_PORT: "80"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? BAD_BEHAVIOR settings
|
||||
USE_BAD_BEHAVIOR: "yes"
|
||||
BAD_BEHAVIOR_STATUS_CODES: "400 401 403 404 405 429 444"
|
||||
BAD_BEHAVIOR_BAN_TIME: "86400"
|
||||
BAD_BEHAVIOR_THRESHOLD: "10"
|
||||
BAD_BEHAVIOR_COUNT_TIME: "60"
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-docker:
|
||||
name: bw-docker
|
|
@ -0,0 +1,134 @@
|
|||
from contextlib import suppress
|
||||
from datetime import datetime
|
||||
from docker import DockerClient
|
||||
from os import getenv
|
||||
from requests import get
|
||||
from requests.exceptions import RequestException
|
||||
from time import sleep
|
||||
from traceback import format_exc
|
||||
|
||||
try:
|
||||
ready = False
|
||||
retries = 0
|
||||
while not ready:
|
||||
with suppress(RequestException):
|
||||
status_code = get(
|
||||
"http://www.example.com", headers={"Host": "www.example.com"}
|
||||
).status_code
|
||||
|
||||
if status_code >= 500:
|
||||
print("❌ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
ready = status_code < 400
|
||||
|
||||
if retries > 10:
|
||||
print("❌ The service took too long to be ready, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not ready:
|
||||
retries += 1
|
||||
print(
|
||||
"⚠️ Waiting for the service to be ready, retrying in 5s ...", flush=True
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
use_bad_behavior = getenv("USE_BAD_BEHAVIOR", "yes")
|
||||
bad_behavior_status_codes = getenv(
|
||||
"BAD_BEHAVIOR_STATUS_CODES", "400 401 403 404 405 429 444"
|
||||
)
|
||||
bad_behavior_ban_time = getenv("BAD_BEHAVIOR_BAN_TIME", "86400")
|
||||
bad_behavior_threshold = getenv("BAD_BEHAVIOR_THRESHOLD", "10")
|
||||
bad_behavior_count_time = getenv("BAD_BEHAVIOR_COUNT_TIME", "60")
|
||||
|
||||
print(
|
||||
"ℹ️ Sending 15 requests to http://www.example.com/?id=/etc/passwd ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
for _ in range(15):
|
||||
get(
|
||||
"http://www.example.com/?id=/etc/passwd",
|
||||
headers={"Host": "www.example.com"},
|
||||
)
|
||||
|
||||
sleep(1)
|
||||
|
||||
status_code = get(
|
||||
f"http://www.example.com",
|
||||
headers={"Host": "www.example.com"},
|
||||
).status_code
|
||||
|
||||
if status_code == 403:
|
||||
if use_bad_behavior == "no":
|
||||
print("❌ Bad Behavior is enabled, it shouldn't be ...", flush=True)
|
||||
exit(1)
|
||||
elif bad_behavior_status_codes != "400 401 403 404 405 429 444":
|
||||
print("❌ Bad Behavior's status codes didn't changed ...", flush=True)
|
||||
exit(1)
|
||||
elif bad_behavior_ban_time != "86400":
|
||||
print(
|
||||
"ℹ️ Sleeping for 7s to wait if Bad Behavior's ban time changed ...",
|
||||
flush=True,
|
||||
)
|
||||
sleep(7)
|
||||
|
||||
status_code = get(
|
||||
f"http://www.example.com",
|
||||
headers={"Host": "www.example.com"},
|
||||
).status_code
|
||||
|
||||
if status_code == 403:
|
||||
print("❌ Bad Behavior's ban time didn't changed ...", flush=True)
|
||||
exit(1)
|
||||
elif bad_behavior_threshold != "10":
|
||||
print("❌ Bad Behavior's threshold didn't changed ...", flush=True)
|
||||
exit(1)
|
||||
elif bad_behavior_count_time != "60":
|
||||
print(
|
||||
"ℹ️ Sleeping for 7s to wait if Bad Behavior's count time changed ...",
|
||||
flush=True,
|
||||
)
|
||||
current_time = datetime.now().timestamp()
|
||||
sleep(7)
|
||||
|
||||
print(
|
||||
"ℹ️ Checking BunkerWeb's logs to see if Bad Behavior's count time changed ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
docker_host = getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
|
||||
docker_client = DockerClient(base_url=docker_host)
|
||||
|
||||
bw_instances = docker_client.containers.list(
|
||||
filters={"label": "bunkerweb.INSTANCE"}
|
||||
)
|
||||
|
||||
if not bw_instances:
|
||||
print("❌ BunkerWeb instance not found ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
bw_instance = bw_instances[0]
|
||||
|
||||
found = False
|
||||
for log in bw_instance.logs(since=current_time).split(b"\n"):
|
||||
if b"decreased counter for IP 192.168.0.3 (0/10)" in log:
|
||||
found = True
|
||||
break
|
||||
|
||||
if not found:
|
||||
print("❌ Bad Behavior's count time didn't changed ...", flush=True)
|
||||
exit(1)
|
||||
elif (
|
||||
use_bad_behavior == "yes"
|
||||
and bad_behavior_status_codes == "400 401 403 404 405 429 444"
|
||||
and bad_behavior_threshold == "10"
|
||||
):
|
||||
print("❌ Bad Behavior is disabled, it shouldn't be ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
print("✅ Bad Behavior is working as expected ...", flush=True)
|
||||
except SystemExit:
|
||||
exit(1)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1,2 @@
|
|||
requests==2.30.0
|
||||
docker==6.1.2
|
|
@ -0,0 +1,126 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "📟 Building badbehavior stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📟 Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📟 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BAD_BEHAVIOR: "no"@USE_BAD_BEHAVIOR: "yes"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_STATUS_CODES: "400 401 404 405 429 444"@BAD_BEHAVIOR_STATUS_CODES: "400 401 403 404 405 429 444"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_BAN_TIME: "5"@BAD_BEHAVIOR_BAN_TIME: "86400"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_THRESHOLD: "20"@BAD_BEHAVIOR_THRESHOLD: "10"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_COUNT_TIME: "5"@BAD_BEHAVIOR_COUNT_TIME: "60"@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "📟 Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📟 Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "📟 Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
for test in "activated" "deactivated" "status_codes" "ban_time" "threshold" "count_time"
|
||||
do
|
||||
if [ "$test" = "activated" ] ; then
|
||||
echo "📟 Running tests with badbehavior activated ..."
|
||||
elif [ "$test" = "deactivated" ] ; then
|
||||
echo "📟 Running tests without badbehavior ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BAD_BEHAVIOR: "yes"@USE_BAD_BEHAVIOR: "no"@' {} \;
|
||||
elif [ "$test" = "status_codes" ] ; then
|
||||
echo "📟 Running tests with badbehavior's 403 status code removed from the list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BAD_BEHAVIOR: "no"@USE_BAD_BEHAVIOR: "yes"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_STATUS_CODES: "400 401 403 404 405 429 444"@BAD_BEHAVIOR_STATUS_CODES: "400 401 404 405 429 444"@' {} \;
|
||||
elif [ "$test" = "ban_time" ] ; then
|
||||
echo "📟 Running tests with badbehavior's ban time changed to 5 seconds ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_STATUS_CODES: "400 401 404 405 429 444"@BAD_BEHAVIOR_STATUS_CODES: "400 401 403 404 405 429 444"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_BAN_TIME: "86400"@BAD_BEHAVIOR_BAN_TIME: "5"@' {} \;
|
||||
elif [ "$test" = "threshold" ] ; then
|
||||
echo "📟 Running tests with badbehavior's threshold set to 20 ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_BAN_TIME: "5"@BAD_BEHAVIOR_BAN_TIME: "86400"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_THRESHOLD: "10"@BAD_BEHAVIOR_THRESHOLD: "20"@' {} \;
|
||||
elif [ "$test" = "count_time" ] ; then
|
||||
echo "📟 Running tests with badbehavior's count time set to 5 seconds ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_THRESHOLD: "20"@BAD_BEHAVIOR_THRESHOLD: "10"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_COUNT_TIME: "60"@BAD_BEHAVIOR_COUNT_TIME: "5"@' {} \;
|
||||
fi
|
||||
|
||||
echo "📟 Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📟 Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "📟 Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("badbehavior-bw-1" "badbehavior-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "📟 Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "📟 Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
docker compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📟 Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "📟 Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "📟 Tests are done ! ✅"
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/blacklist_api
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8080", "--proxy-headers", "--forwarded-allow-ips", "\"*\"" ]
|
|
@ -0,0 +1,30 @@
|
|||
from fastapi import FastAPI
|
||||
from fastapi.responses import PlainTextResponse
|
||||
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
|
||||
@app.get("/ip")
|
||||
async def ip():
|
||||
return PlainTextResponse("192.168.0.3\n10.0.0.0/8\n127.0.0.1/32")
|
||||
|
||||
|
||||
@app.get("/rdns")
|
||||
async def rdns():
|
||||
return PlainTextResponse(".example.com\n.example.org\n.bw-services")
|
||||
|
||||
|
||||
@app.get("/asn")
|
||||
async def asn():
|
||||
return PlainTextResponse("1234\n13335\n5678")
|
||||
|
||||
|
||||
@app.get("/user_agent")
|
||||
async def user_agent():
|
||||
return PlainTextResponse("BunkerBot\nCensysInspect\nShodanInspect\nZmEu\nmasscan")
|
||||
|
||||
|
||||
@app.get("/uri")
|
||||
async def uri():
|
||||
return PlainTextResponse("/admin\n/login")
|
|
@ -0,0 +1,2 @@
|
|||
fastapi==0.95.1
|
||||
uvicorn[standard]==0.22.0
|
|
@ -0,0 +1,9 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
init:
|
||||
build: init
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
volumes:
|
||||
- ./init/output:/output
|
|
@ -0,0 +1,72 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_BLACKLIST: "yes"
|
||||
BLACKLIST_IP: ""
|
||||
BLACKLIST_IP_URLS: ""
|
||||
BLACKLIST_RDNS_GLOBAL: "yes"
|
||||
BLACKLIST_RDNS: ""
|
||||
BLACKLIST_RDNS_URLS: ""
|
||||
BLACKLIST_ASN: ""
|
||||
BLACKLIST_ASN_URLS: ""
|
||||
BLACKLIST_USER_AGENT: ""
|
||||
BLACKLIST_USER_AGENT_URLS: ""
|
||||
BLACKLIST_URI: ""
|
||||
BLACKLIST_URI_URLS: ""
|
||||
BLACKLIST_IGNORE_IP: ""
|
||||
BLACKLIST_IGNORE_IP_URLS: ""
|
||||
BLACKLIST_IGNORE_RDNS: ""
|
||||
BLACKLIST_IGNORE_RDNS_URLS: ""
|
||||
BLACKLIST_IGNORE_ASN: ""
|
||||
BLACKLIST_IGNORE_ASN_URLS: ""
|
||||
BLACKLIST_IGNORE_USER_AGENT: ""
|
||||
BLACKLIST_IGNORE_USER_AGENT_URLS: ""
|
||||
BLACKLIST_IGNORE_URI: ""
|
||||
BLACKLIST_IGNORE_URI_URLS: ""
|
||||
extra_hosts:
|
||||
- "www.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
global-tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_BLACKLIST: "yes"
|
||||
BLACKLIST_IP: ""
|
||||
BLACKLIST_IP_URLS: ""
|
||||
BLACKLIST_RDNS_GLOBAL: "yes"
|
||||
BLACKLIST_RDNS: ""
|
||||
BLACKLIST_RDNS_URLS: ""
|
||||
BLACKLIST_ASN: ""
|
||||
BLACKLIST_ASN_URLS: ""
|
||||
BLACKLIST_USER_AGENT: ""
|
||||
BLACKLIST_USER_AGENT_URLS: ""
|
||||
BLACKLIST_URI: ""
|
||||
BLACKLIST_URI_URLS: ""
|
||||
BLACKLIST_IGNORE_IP: ""
|
||||
BLACKLIST_IGNORE_IP_URLS: ""
|
||||
BLACKLIST_IGNORE_RDNS: ""
|
||||
BLACKLIST_IGNORE_RDNS_URLS: ""
|
||||
BLACKLIST_IGNORE_ASN: ""
|
||||
BLACKLIST_IGNORE_ASN_URLS: ""
|
||||
BLACKLIST_IGNORE_USER_AGENT: ""
|
||||
BLACKLIST_IGNORE_USER_AGENT_URLS: ""
|
||||
BLACKLIST_IGNORE_URI: ""
|
||||
BLACKLIST_IGNORE_URI_URLS: ""
|
||||
extra_hosts:
|
||||
- "www.example.com:1.0.0.2"
|
||||
networks:
|
||||
bw-global-network:
|
||||
ipv4_address: 1.0.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
||||
bw-global-network:
|
||||
external: true
|
|
@ -0,0 +1,102 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
volumes:
|
||||
- ./index.html:/var/www/html/index.html
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24"
|
||||
HTTP_PORT: "80"
|
||||
USE_BUNKERNET: "no"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? BLACKLIST settings
|
||||
USE_BLACKLIST: "yes"
|
||||
BLACKLIST_IP: ""
|
||||
BLACKLIST_IP_URLS: ""
|
||||
BLACKLIST_RDNS_GLOBAL: "yes"
|
||||
BLACKLIST_RDNS: ""
|
||||
BLACKLIST_RDNS_URLS: ""
|
||||
BLACKLIST_ASN: ""
|
||||
BLACKLIST_ASN_URLS: ""
|
||||
BLACKLIST_USER_AGENT: ""
|
||||
BLACKLIST_USER_AGENT_URLS: ""
|
||||
BLACKLIST_URI: ""
|
||||
BLACKLIST_URI_URLS: ""
|
||||
BLACKLIST_IGNORE_IP: ""
|
||||
BLACKLIST_IGNORE_IP_URLS: ""
|
||||
BLACKLIST_IGNORE_RDNS: ""
|
||||
BLACKLIST_IGNORE_RDNS_URLS: ""
|
||||
BLACKLIST_IGNORE_ASN: ""
|
||||
BLACKLIST_IGNORE_ASN_URLS: ""
|
||||
BLACKLIST_IGNORE_USER_AGENT: ""
|
||||
BLACKLIST_IGNORE_USER_AGENT_URLS: ""
|
||||
BLACKLIST_IGNORE_URI: ""
|
||||
BLACKLIST_IGNORE_URI_URLS: ""
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
bw-global-network:
|
||||
ipv4_address: 1.0.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
volumes:
|
||||
- bw-data:/data
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
blacklist-api:
|
||||
build: api
|
||||
networks:
|
||||
bw-docker:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.4
|
||||
|
||||
volumes:
|
||||
bw-data:
|
||||
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-global-network:
|
||||
name: bw-global-network
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 1.0.0.0/8
|
||||
bw-docker:
|
||||
name: bw-docker
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/blacklist_init
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,33 @@
|
|||
from datetime import date
|
||||
from gzip import GzipFile
|
||||
from io import BytesIO
|
||||
from pathlib import Path
|
||||
from maxminddb import MODE_FD, open_database
|
||||
from requests import get
|
||||
|
||||
# Compute the mmdb URL
|
||||
mmdb_url = f"https://download.db-ip.com/free/dbip-asn-lite-{date.today().strftime('%Y-%m')}.mmdb.gz"
|
||||
|
||||
# Download the mmdb file in memory
|
||||
print(f"Downloading mmdb file from url {mmdb_url} ...", flush=True)
|
||||
file_content = BytesIO()
|
||||
with get(mmdb_url, stream=True) as resp:
|
||||
resp.raise_for_status()
|
||||
for chunk in resp.iter_content(chunk_size=4 * 1024):
|
||||
if chunk:
|
||||
file_content.write(chunk)
|
||||
file_content.seek(0)
|
||||
|
||||
with open_database(GzipFile(fileobj=file_content, mode="rb"), mode=MODE_FD) as reader:
|
||||
dbip_asn = reader.get("1.0.0.3")
|
||||
|
||||
if not dbip_asn:
|
||||
print(f"❌ Error while reading mmdb file from {mmdb_url}", flush=True)
|
||||
exit(1)
|
||||
|
||||
print(
|
||||
f"✅ ASN for IP 1.0.0.3 is {dbip_asn['autonomous_system_number']}, saving it to /output/ip_asn.txt",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
Path("/output/ip_asn.txt").write_text(str(dbip_asn["autonomous_system_number"]))
|
|
@ -0,0 +1,2 @@
|
|||
maxminddb==2.3.0
|
||||
requests==2.30.0
|
|
@ -0,0 +1,212 @@
|
|||
from contextlib import suppress
|
||||
from os import getenv
|
||||
from requests import get
|
||||
from requests.exceptions import RequestException
|
||||
from time import sleep
|
||||
from traceback import format_exc
|
||||
|
||||
try:
|
||||
ready = False
|
||||
retries = 0
|
||||
while not ready:
|
||||
with suppress(RequestException):
|
||||
status_code = get(
|
||||
"http://www.example.com", headers={"Host": "www.example.com"}
|
||||
).status_code
|
||||
|
||||
if status_code >= 500:
|
||||
print("❌ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
ready = status_code < 400 or status_code == 403
|
||||
|
||||
if retries > 10:
|
||||
print("❌ The service took too long to be ready, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not ready:
|
||||
retries += 1
|
||||
print(
|
||||
"⚠️ Waiting for the service to be ready, retrying in 5s ...", flush=True
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
use_blacklist = getenv("USE_BLACKLIST", "yes") == "yes"
|
||||
|
||||
blacklist_ip = getenv("BLACKLIST_IP", "")
|
||||
blacklist_ip_urls = getenv("BLACKLIST_IP_URLS", "")
|
||||
blacklist_rdns_global = getenv("BLACKLIST_RDNS_GLOBAL", "yes") == "yes"
|
||||
blacklist_rdns = getenv("BLACKLIST_RDNS", "")
|
||||
blacklist_rdns_urls = getenv("BLACKLIST_RDNS_URLS", "")
|
||||
blacklist_asn = getenv("BLACKLIST_ASN", "")
|
||||
blacklist_asn_urls = getenv("BLACKLIST_ASN_URLS", "")
|
||||
blacklist_user_agent = getenv("BLACKLIST_USER_AGENT", "")
|
||||
blacklist_user_agent_urls = getenv("BLACKLIST_USER_AGENT_URLS", "")
|
||||
blacklist_uri = getenv("BLACKLIST_URI", "")
|
||||
blacklist_uri_urls = getenv("BLACKLIST_URI_URLS", "")
|
||||
|
||||
blacklist_ignore_ip = getenv("BLACKLIST_IGNORE_IP", "")
|
||||
blacklist_ignore_ip_urls = getenv("BLACKLIST_IGNORE_IP_URLS", "")
|
||||
blacklist_ignore_rdns = getenv("BLACKLIST_IGNORE_RDNS", "")
|
||||
blacklist_ignore_rdns_urls = getenv("BLACKLIST_IGNORE_RDNS_URLS", "")
|
||||
blacklist_ignore_asn = getenv("BLACKLIST_IGNORE_ASN", "")
|
||||
blacklist_ignore_asn_urls = getenv("BLACKLIST_IGNORE_ASN_URLS", "")
|
||||
blacklist_ignore_user_agent = getenv("BLACKLIST_IGNORE_USER_AGENT", "")
|
||||
blacklist_ignore_user_agent_urls = getenv("BLACKLIST_IGNORE_USER_AGENT_URLS", "")
|
||||
blacklist_ignore_uri = getenv("BLACKLIST_IGNORE_URI", "")
|
||||
blacklist_ignore_uri_urls = getenv("BLACKLIST_IGNORE_URI_URLS", "")
|
||||
|
||||
print(
|
||||
"ℹ️ Sending a request to http://www.example.com/admin with User-Agent: BunkerBot ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
status_code = get(
|
||||
f"http://www.example.com/admin",
|
||||
headers={"Host": "www.example.com", "User-Agent": "BunkerBot"},
|
||||
).status_code
|
||||
|
||||
if status_code == 403:
|
||||
if not use_blacklist:
|
||||
print(
|
||||
"❌ The request was rejected, but the blacklist is disabled, exiting ..."
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_rdns_global and (
|
||||
blacklist_rdns != "" or blacklist_rdns_urls != ""
|
||||
):
|
||||
print(
|
||||
"❌ Blacklist's RDNS global didn't work as expected, exiting ...",
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_ignore_ip != "":
|
||||
print("❌ Blacklist's ignore IP didn't work as expected, exiting ...")
|
||||
exit(1)
|
||||
elif blacklist_ignore_ip_urls != "":
|
||||
print(
|
||||
"❌ Blacklist's ignore IP urls didn't work as expected, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_ignore_rdns != "":
|
||||
print("❌ Blacklist's ignore RDNS didn't work as expected, exiting ...")
|
||||
exit(1)
|
||||
elif blacklist_ignore_rdns_urls != "":
|
||||
print(
|
||||
"❌ Blacklist's ignore RDNS urls didn't work as expected, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_ignore_asn != "":
|
||||
print("❌ Blacklist's ignore ASN didn't work as expected, exiting ...")
|
||||
exit(1)
|
||||
elif blacklist_ignore_asn_urls != "":
|
||||
print(
|
||||
"❌ Blacklist's ignore ASN urls didn't work as expected, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_ignore_user_agent != "":
|
||||
print(
|
||||
"❌ Blacklist's ignore user agent didn't work as expected, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_ignore_user_agent_urls != "":
|
||||
print(
|
||||
"❌ Blacklist's ignore user agent urls didn't work as expected, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_ignore_uri != "":
|
||||
print("❌ Blacklist's ignore URI didn't work as expected, exiting ...")
|
||||
exit(1)
|
||||
elif blacklist_ignore_uri_urls != "":
|
||||
print(
|
||||
"❌ Blacklist's ignore URI urls didn't work as expected, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_ip != "" and not any(
|
||||
[blacklist_ignore_ip, blacklist_ignore_ip_urls, not use_blacklist]
|
||||
):
|
||||
print("❌ Blacklist's IP didn't work as expected, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif blacklist_ip_urls != "":
|
||||
print("❌ Blacklist's IP urls didn't work as expected, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif blacklist_rdns != "" and not any(
|
||||
[
|
||||
blacklist_ignore_rdns,
|
||||
blacklist_ignore_rdns_urls,
|
||||
blacklist_rdns_global,
|
||||
]
|
||||
):
|
||||
print("❌ Blacklist's RDNS didn't work as expected, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif blacklist_rdns_urls != "" and blacklist_rdns_global:
|
||||
print(
|
||||
"❌ Blacklist's RDNS urls didn't work as expected, exiting ...", flush=True
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_asn != "" and not any(
|
||||
[blacklist_ignore_asn, blacklist_ignore_asn_urls]
|
||||
):
|
||||
print("❌ Blacklist's ASN didn't work as expected, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif blacklist_asn_urls != "":
|
||||
print("❌ Blacklist's ASN urls didn't work as expected, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif blacklist_user_agent != "" and not any(
|
||||
[blacklist_ignore_user_agent, blacklist_ignore_user_agent_urls]
|
||||
):
|
||||
print(
|
||||
"❌ Blacklist's User Agent didn't work as expected, exiting ...", flush=True
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_user_agent_urls != "":
|
||||
print(
|
||||
"❌ Blacklist's User Agent urls didn't work as expected, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_uri != "" and not any(
|
||||
[blacklist_ignore_uri, blacklist_ignore_uri_urls]
|
||||
):
|
||||
print("❌ Blacklist's URI didn't work as expected, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif blacklist_uri_urls != "":
|
||||
print("❌ Blacklist's URI urls didn't work as expected, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif use_blacklist and not any(
|
||||
[
|
||||
blacklist_ip,
|
||||
blacklist_ip_urls,
|
||||
blacklist_rdns,
|
||||
blacklist_rdns_urls,
|
||||
blacklist_asn,
|
||||
blacklist_asn_urls,
|
||||
blacklist_user_agent,
|
||||
blacklist_user_agent_urls,
|
||||
blacklist_uri,
|
||||
blacklist_uri_urls,
|
||||
blacklist_ignore_ip,
|
||||
blacklist_ignore_ip_urls,
|
||||
blacklist_ignore_rdns,
|
||||
blacklist_ignore_rdns_urls,
|
||||
blacklist_ignore_asn,
|
||||
blacklist_ignore_asn_urls,
|
||||
blacklist_ignore_user_agent,
|
||||
blacklist_ignore_user_agent_urls,
|
||||
blacklist_ignore_uri,
|
||||
blacklist_ignore_uri_urls,
|
||||
]
|
||||
):
|
||||
print("❌ Blacklist is disabled, it shouldn't be ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
print("✅ Blacklist is working as expected ...", flush=True)
|
||||
except SystemExit:
|
||||
exit(1)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1 @@
|
|||
requests==2.30.0
|
|
@ -0,0 +1,258 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "🏴 Building blacklist stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🏴 Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🏴 Building custom api image ..."
|
||||
docker compose build blacklist-api
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🏴 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🏴 Building tests images ..."
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🏴 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
as_number=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
rm -rf init/output
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BLACKLIST: "no"@USE_BLACKLIST: "yes"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IP: "0.0.0.0/0"@BLACKLIST_IP: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_IP: "192.168.0.3"@BLACKLIST_IGNORE_IP: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IP_URLS: "http://blacklist-api:8080/ip"@BLACKLIST_IP_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_IP_URLS: "http://blacklist-api:8080/ip"@BLACKLIST_IGNORE_IP_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_RDNS_GLOBAL: "no"@BLACKLIST_RDNS_GLOBAL: "yes"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_RDNS: ".bw-services"@BLACKLIST_RDNS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_RDNS: ".bw-services"@BLACKLIST_IGNORE_RDNS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_RDNS_URLS: "http://blacklist-api:8080/rdns"@BLACKLIST_RDNS_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_RDNS_URLS: "http://blacklist-api:8080/rdns"@BLACKLIST_IGNORE_RDNS_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_ASN: "[0-9]*"@BLACKLIST_ASN: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_ASN: "[0-9]*"@BLACKLIST_IGNORE_ASN: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_ASN_URLS: "http://blacklist-api:8080/asn"@BLACKLIST_ASN_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_ASN_URLS: "http://blacklist-api:8080/asn"@BLACKLIST_IGNORE_ASN_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_USER_AGENT: "BunkerBot"@BLACKLIST_USER_AGENT: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_USER_AGENT: "BunkerBot"@BLACKLIST_IGNORE_USER_AGENT: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_USER_AGENT_URLS: "http://blacklist-api:8080/user_agent"@BLACKLIST_USER_AGENT_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_USER_AGENT_URLS: "http://blacklist-api:8080/user_agent"@BLACKLIST_IGNORE_USER_AGENT_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_URI: "/admin"@BLACKLIST_URI: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_URI: "/admin"@BLACKLIST_IGNORE_URI: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_URI_URLS: "http://blacklist-api:8080/uri"@BLACKLIST_URI_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_URI_URLS: "http://blacklist-api:8080/uri"@BLACKLIST_IGNORE_URI_URLS: ""@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🏴 Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🏴 Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🏴 Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
echo "🏴 Initializing workspace ..."
|
||||
rm -rf init/output
|
||||
mkdir -p init/output
|
||||
docker compose -f docker-compose.init.yml up --build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🏴 Build failed ❌"
|
||||
exit 1
|
||||
elif ! [[ -f "init/output/ip_asn.txt" ]]; then
|
||||
echo "🏴 ip_asn.txt not found ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
as_number=$(cat init/output/ip_asn.txt)
|
||||
|
||||
if [[ $as_number = "" ]]; then
|
||||
echo "🏴 AS number not found ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -rf init/output
|
||||
|
||||
for test in "ip" "deactivated" "ignore_ip" "ignore_ip_urls" "ip_urls" "rdns" "rdns_global" "ignore_rdns" "ignore_rdns_urls" "rdns_urls" "asn" "ignore_asn" "ignore_asn_urls" "asn_urls" "user_agent" "ignore_user_agent" "ignore_user_agent_urls" "user_agent_urls" "uri" "ignore_uri" "ignore_uri_urls" "uri_urls"
|
||||
do
|
||||
if [ "$test" = "ip" ] ; then
|
||||
echo "🏴 Running tests with the network 0.0.0.0/0 in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IP: ""@BLACKLIST_IP: "0.0.0.0/0"@' {} \;
|
||||
elif [ "$test" = "deactivated" ] ; then
|
||||
echo "🏴 Running tests when deactivating the blacklist ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BLACKLIST: "yes"@USE_BLACKLIST: "no"@' {} \;
|
||||
elif [ "$test" = "ignore_ip" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_ip set to 192.168.0.3 ..."
|
||||
echo "ℹ️ Keeping the network 0.0.0.0/0 in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BLACKLIST: "no"@USE_BLACKLIST: "yes"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_IP: ""@BLACKLIST_IGNORE_IP: "192.168.0.3"@' {} \;
|
||||
elif [ "$test" = "ignore_ip_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_ip_urls set to http://blacklist-api:8080/ip ..."
|
||||
echo "ℹ️ Keeping the network 0.0.0.0/0 in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_IP: "192.168.0.3"@BLACKLIST_IGNORE_IP: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_IP_URLS: ""@BLACKLIST_IGNORE_IP_URLS: "http://blacklist-api:8080/ip"@' {} \;
|
||||
elif [ "$test" = "ip_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ip url set to http://blacklist-api:8080/ip ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_IP_URLS: "http://blacklist-api:8080/ip"@BLACKLIST_IGNORE_IP_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IP: "0.0.0.0/0"@BLACKLIST_IP: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IP_URLS: ""@BLACKLIST_IP_URLS: "http://blacklist-api:8080/ip"@' {} \;
|
||||
elif [ "$test" = "rdns" ] ; then
|
||||
echo "🏴 Running tests with blacklist's rdns set to .bw-services ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IP_URLS: "http://blacklist-api:8080/ip"@BLACKLIST_IP_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_RDNS: ""@BLACKLIST_RDNS: ".bw-services"@' {} \;
|
||||
elif [ "$test" = "rdns_global" ] ; then
|
||||
echo "🏴 Running tests when blacklist's rdns also scans local ip addresses ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_RDNS_GLOBAL: "yes"@BLACKLIST_RDNS_GLOBAL: "no"@' {} \;
|
||||
elif [ "$test" = "ignore_rdns" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_rdns set to .bw-services ..."
|
||||
echo "ℹ️ Keeping the rdns also scanning local ip addresses ..."
|
||||
echo "ℹ️ Keeping the rdns .bw-services in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_RDNS: ""@BLACKLIST_IGNORE_RDNS: ".bw-services"@' {} \;
|
||||
elif [ "$test" = "ignore_rdns_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_rdns_urls set to http://blacklist-api:8080/rdns ..."
|
||||
echo "ℹ️ Keeping the rdns also scanning local ip addresses ..."
|
||||
echo "ℹ️ Keeping the rdns .bw-services in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_RDNS: ".bw-services"@BLACKLIST_IGNORE_RDNS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_RDNS_URLS: ""@BLACKLIST_IGNORE_RDNS_URLS: "http://blacklist-api:8080/rdns"@' {} \;
|
||||
elif [ "$test" = "rdns_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's rdns url set to http://blacklist-api:8080/rdns ..."
|
||||
echo "ℹ️ Keeping the rdns also scanning local ip addresses ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_RDNS_URLS: "http://blacklist-api:8080/rdns"@BLACKLIST_IGNORE_RDNS_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_RDNS: ".bw-services"@BLACKLIST_RDNS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_RDNS_URLS: ""@BLACKLIST_RDNS_URLS: "http://blacklist-api:8080/rdns"@' {} \;
|
||||
elif [ "$test" = "asn" ] ; then
|
||||
echo "🏴 Running tests with blacklist's asn set to $as_number ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_RDNS_GLOBAL: "no"@BLACKLIST_RDNS_GLOBAL: "yes"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_RDNS_URLS: "http://blacklist-api:8080/rdns"@BLACKLIST_RDNS_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_ASN: ""@BLACKLIST_ASN: "'"$as_number"'"@' {} \;
|
||||
elif [ "$test" = "ignore_asn" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_asn set to $as_number ..."
|
||||
echo "ℹ️ Keeping the asn $as_number in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_ASN: ""@BLACKLIST_IGNORE_ASN: "'"$as_number"'"@' {} \;
|
||||
elif [ "$test" = "ignore_asn_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_asn_urls set to http://blacklist-api:8080/asn ..."
|
||||
echo "ℹ️ Keeping the asn $as_number in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_ASN: "'"$as_number"'"@BLACKLIST_IGNORE_ASN: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_ASN_URLS: ""@BLACKLIST_IGNORE_ASN_URLS: "http://blacklist-api:8080/asn"@' {} \;
|
||||
elif [ "$test" = "asn_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's asn url set to http://blacklist-api:8080/asn ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_ASN_URLS: "http://blacklist-api:8080/asn"@BLACKLIST_IGNORE_ASN_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_ASN: "'"$as_number"'"@BLACKLIST_ASN: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_ASN_URLS: ""@BLACKLIST_ASN_URLS: "http://blacklist-api:8080/asn"@' {} \;
|
||||
elif [ "$test" = "user_agent" ] ; then
|
||||
echo "🏴 Running tests with blacklist's user_agent set to BunkerBot ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_ASN_URLS: "http://blacklist-api:8080/asn"@BLACKLIST_ASN_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_USER_AGENT: ""@BLACKLIST_USER_AGENT: "BunkerBot"@' {} \;
|
||||
elif [ "$test" = "ignore_user_agent" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_user_agent set to BunkerBot ..."
|
||||
echo "ℹ️ Keeping the user_agent BunkerBot in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_USER_AGENT: ""@BLACKLIST_IGNORE_USER_AGENT: "BunkerBot"@' {} \;
|
||||
elif [ "$test" = "ignore_user_agent_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_user_agent_urls set to http://blacklist-api:8080/user_agent ..."
|
||||
echo "ℹ️ Keeping the user_agent BunkerBot in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_USER_AGENT: "BunkerBot"@BLACKLIST_IGNORE_USER_AGENT: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_USER_AGENT_URLS: ""@BLACKLIST_IGNORE_USER_AGENT_URLS: "http://blacklist-api:8080/user_agent"@' {} \;
|
||||
elif [ "$test" = "user_agent_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's user_agent url set to http://blacklist-api:8080/user_agent ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_USER_AGENT_URLS: "http://blacklist-api:8080/user_agent"@BLACKLIST_IGNORE_USER_AGENT_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_USER_AGENT: "BunkerBot"@BLACKLIST_USER_AGENT: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_USER_AGENT_URLS: ""@BLACKLIST_USER_AGENT_URLS: "http://blacklist-api:8080/user_agent"@' {} \;
|
||||
elif [ "$test" = "uri" ] ; then
|
||||
echo "🏴 Running tests with blacklist's uri set to /admin ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_USER_AGENT_URLS: "http://blacklist-api:8080/user_agent"@BLACKLIST_USER_AGENT_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_URI: ""@BLACKLIST_URI: "/admin"@' {} \;
|
||||
elif [ "$test" = "ignore_uri" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_uri set to /admin ..."
|
||||
echo "ℹ️ Keeping the uri /admin in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_URI: ""@BLACKLIST_IGNORE_URI: "/admin"@' {} \;
|
||||
elif [ "$test" = "ignore_uri_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_ip_urls set to http://blacklist-api:8080/uri ..."
|
||||
echo "ℹ️ Keeping the uri /admin in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_URI: "/admin"@BLACKLIST_IGNORE_URI: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_URI_URLS: ""@BLACKLIST_IGNORE_URI_URLS: "http://blacklist-api:8080/uri"@' {} \;
|
||||
elif [ "$test" = "uri_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's uri url set to http://blacklist-api:8080/uri ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_URI_URLS: "http://blacklist-api:8080/uri"@BLACKLIST_IGNORE_URI_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_URI: "/admin"@BLACKLIST_URI: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_URI_URLS: ""@BLACKLIST_URI_URLS: "http://blacklist-api:8080/uri"@' {} \;
|
||||
fi
|
||||
|
||||
echo "🏴 Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🏴 Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "🏴 Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("blacklist-bw-1" "blacklist-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "🏴 Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "🏴 Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
if [[ "$test" = "asn" || "$test" = "ignore_asn" || "$test" = "ignore_asn_urls" || "$test" = "asn_urls" ]] ; then
|
||||
docker compose -f docker-compose.test.yml up global-tests --abort-on-container-exit --exit-code-from global-tests 2>/dev/null
|
||||
else
|
||||
docker compose -f docker-compose.test.yml up tests --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
fi
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🏴 Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb, BunkerWeb Scheduler and Custom API logs ..."
|
||||
docker compose logs bw bw-scheduler blacklist-api
|
||||
exit 1
|
||||
else
|
||||
echo "🏴 Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "🏴 Tests are done ! ✅"
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,17 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_BROTLI: "no"
|
||||
extra_hosts:
|
||||
- "www.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
|
@ -0,0 +1,67 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24"
|
||||
HTTP_PORT: "80"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
USE_REVERSE_PROXY: "yes"
|
||||
REVERSE_PROXY_HOST: "http://app1:8080"
|
||||
REVERSE_PROXY_URL: "/"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? BROTLI settings
|
||||
USE_BROTLI: "no"
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
app1:
|
||||
image: nginxdemos/nginx-hello
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.4
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-docker:
|
|
@ -0,0 +1,62 @@
|
|||
from contextlib import suppress
|
||||
from os import getenv
|
||||
from requests import RequestException, get, head
|
||||
from traceback import format_exc
|
||||
from time import sleep
|
||||
|
||||
|
||||
try:
|
||||
ready = False
|
||||
retries = 0
|
||||
while not ready:
|
||||
with suppress(RequestException):
|
||||
status_code = get(
|
||||
"http://www.example.com", headers={"Host": "www.example.com"}
|
||||
).status_code
|
||||
|
||||
if status_code >= 500:
|
||||
print("❌ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
ready = status_code < 400
|
||||
|
||||
if retries > 10:
|
||||
print("❌ The service took too long to be ready, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not ready:
|
||||
retries += 1
|
||||
print(
|
||||
"⚠️ Waiting for the service to be ready, retrying in 5s ...", flush=True
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
use_brotli = getenv("USE_BROTLI", "no") == "yes"
|
||||
|
||||
print(
|
||||
"ℹ️ Sending a HEAD request to http://www.example.com ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
response = head(
|
||||
"http://www.example.com",
|
||||
headers={"Host": "www.example.com", "Accept-Encoding": "br"},
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
if not use_brotli and response.headers.get("Content-Encoding", "").lower() == "br":
|
||||
print(
|
||||
f"❌ Content-Encoding header is present even if Brotli is deactivated, exiting ...\nheaders: {response.headers}"
|
||||
)
|
||||
exit(1)
|
||||
elif use_brotli and response.headers.get("Content-Encoding", "").lower() != "br":
|
||||
print(
|
||||
f"❌ Content-Encoding header is not present or with the wrong value even if Brotli is activated, exiting ...\nheaders: {response.headers}"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ Brotli is working as expected ...", flush=True)
|
||||
except SystemExit:
|
||||
exit(1)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1 @@
|
|||
requests==2.30.0
|
|
@ -0,0 +1,106 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "📦 Building brotli stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker app1
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📦 Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📦 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BROTLI: "yes"@USE_BROTLI: "no"@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "📦 Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📦 Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "📦 Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
for test in "deactivated" "activated"
|
||||
do
|
||||
if [ "$test" = "deactivated" ] ; then
|
||||
echo "📦 Running tests without brotli ..."
|
||||
elif [ "$test" = "activated" ] ; then
|
||||
echo "📦 Running tests with brotli ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BROTLI: "no"@USE_BROTLI: "yes"@' {} \;
|
||||
fi
|
||||
|
||||
echo "📦 Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📦 Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "📦 Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("brotli-bw-1" "brotli-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "📦 Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "📦 Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
docker compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📦 Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "📦 Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "📦 Tests are done ! ✅"
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/blacklist_api
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8080", "--proxy-headers", "--forwarded-allow-ips", "\"*\"" ]
|
|
@ -0,0 +1,46 @@
|
|||
from uuid import uuid4
|
||||
from fastapi import FastAPI, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
|
||||
app = FastAPI()
|
||||
instance_id = None
|
||||
report_num = 0
|
||||
|
||||
|
||||
@app.get("/ping")
|
||||
async def ping(_: Request):
|
||||
return JSONResponse(status_code=200, content={"result": "ok", "data": "pong"})
|
||||
|
||||
|
||||
@app.post("/register")
|
||||
async def register(_: Request):
|
||||
global instance_id
|
||||
instance_id = str(uuid4())
|
||||
return JSONResponse(status_code=200, content={"result": "ok", "data": instance_id})
|
||||
|
||||
|
||||
@app.post("/report")
|
||||
async def report(_: Request):
|
||||
global report_num
|
||||
report_num += 1
|
||||
return JSONResponse(
|
||||
status_code=200, content={"result": "ok", "data": "Report acknowledged."}
|
||||
)
|
||||
|
||||
|
||||
@app.get("/db")
|
||||
async def db(_: Request):
|
||||
return JSONResponse(status_code=200, content={"result": "ok", "data": []})
|
||||
|
||||
|
||||
@app.get("/instance_id")
|
||||
async def get_instance_id(_: Request):
|
||||
global instance_id
|
||||
return JSONResponse(status_code=200, content={"result": "ok", "data": instance_id})
|
||||
|
||||
|
||||
@app.get("/report_num")
|
||||
async def get_report_num(_: Request):
|
||||
global report_num
|
||||
return JSONResponse(status_code=200, content={"result": "ok", "data": report_num})
|
|
@ -0,0 +1,2 @@
|
|||
fastapi==0.95.1
|
||||
uvicorn[standard]==0.22.0
|
|
@ -0,0 +1,18 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_BUNKERNET: "yes"
|
||||
BUNKERNET_SERVER: "http://bunkernet-api:8080"
|
||||
extra_hosts:
|
||||
- "www.example.com:1.0.0.2"
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 1.0.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
|
@ -0,0 +1,68 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
volumes:
|
||||
- ./index.html:/var/www/html/index.html
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24"
|
||||
HTTP_PORT: "80"
|
||||
USE_BLACKLIST: "no"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? BUNKERNET settings
|
||||
USE_BUNKERNET: "yes"
|
||||
BUNKERNET_SERVER: "http://bunkernet-api:8080"
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 1.0.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
bunkernet-api:
|
||||
build: api
|
||||
networks:
|
||||
bw-docker:
|
||||
bw-services:
|
||||
ipv4_address: 1.0.0.4
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 1.0.0.0/24
|
||||
bw-docker:
|
||||
name: bw-docker
|
|
@ -0,0 +1,80 @@
|
|||
from contextlib import suppress
|
||||
from os import getenv
|
||||
from requests import get
|
||||
from requests.exceptions import RequestException
|
||||
from time import sleep
|
||||
from traceback import format_exc
|
||||
|
||||
try:
|
||||
ready = False
|
||||
retries = 0
|
||||
while not ready:
|
||||
with suppress(RequestException):
|
||||
status_code = get(
|
||||
"http://www.example.com", headers={"Host": "www.example.com"}
|
||||
).status_code
|
||||
|
||||
if status_code >= 500:
|
||||
print("❌ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
ready = status_code < 400
|
||||
|
||||
if retries > 10:
|
||||
print("❌ The service took too long to be ready, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not ready:
|
||||
retries += 1
|
||||
print(
|
||||
"⚠️ Waiting for the service to be ready, retrying in 5s ...", flush=True
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
use_bunkernet = getenv("USE_BUNKERNET", "yes") == "yes"
|
||||
bunkernet_server = getenv("BUNKERNET_SERVER")
|
||||
|
||||
if not bunkernet_server:
|
||||
print("❌ BunkerNet server not specified, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
instance_id = get(f"{bunkernet_server}/instance_id").json()["data"]
|
||||
|
||||
if use_bunkernet and not instance_id:
|
||||
print("❌ BunkerNet plugin did not register, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not use_bunkernet and instance_id:
|
||||
print("❌ BunkerNet plugin registered but it shouldn't, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not use_bunkernet and not instance_id:
|
||||
print("✅ BunkerNet plugin is disabled and not registered ...", flush=True)
|
||||
exit(0)
|
||||
|
||||
print(
|
||||
"ℹ️ Sending a request to http://www.example.com/?id=/etc/passwd ...", flush=True
|
||||
)
|
||||
|
||||
status_code = get(
|
||||
f"http://www.example.com/?id=/etc/passwd",
|
||||
headers={"Host": "www.example.com"},
|
||||
).status_code
|
||||
|
||||
print(f"ℹ️ Status code: {status_code}", flush=True)
|
||||
|
||||
if status_code != 403:
|
||||
print("❌ The request was not blocked, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
sleep(2)
|
||||
|
||||
report_num = get(f"{bunkernet_server}/report_num").json()["data"]
|
||||
|
||||
if report_num < 1:
|
||||
print("❌ The report was not sent, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
print("✅ BunkerNet is working as expected ...", flush=True)
|
||||
except SystemExit as e:
|
||||
exit(e.code)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1 @@
|
|||
requests==2.30.0
|
|
@ -0,0 +1,115 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "🕸️ Building bunkernet stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🕸️ Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🕸️ Building custom api image ..."
|
||||
docker compose build bunkernet-api
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🕸️ Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🕸️ Building tests images ..."
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🕸️ Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BUNKERNET: "no"@USE_BUNKERNET: "yes"@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🕸️ Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🕸️ Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🕸️ Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
for test in "activated" "deactivated"
|
||||
do
|
||||
if [ "$test" = "activated" ] ; then
|
||||
echo "🕸️ Running tests with bunkernet activated ..."
|
||||
elif [ "$test" = "deactivated" ] ; then
|
||||
echo "🕸️ Running tests without bunkernet ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BUNKERNET: "yes"@USE_BUNKERNET: "no"@' {} \;
|
||||
fi
|
||||
|
||||
echo "🕸️ Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🕸️ Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "🕸️ Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("bunkernet-bw-1" "bunkernet-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "🕸️ Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "🕸️ Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
docker compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🏴 Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb, BunkerWeb Scheduler and Custom API logs ..."
|
||||
docker compose logs bw bw-scheduler bunkernet-api
|
||||
exit 1
|
||||
else
|
||||
echo "🏴 Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "🕸️ Tests are done ! ✅"
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,20 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_CLIENT_CACHE: "no"
|
||||
CLIENT_CACHE_EXTENSIONS: "jpg|jpeg|png|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2"
|
||||
CLIENT_CACHE_ETAG: "yes"
|
||||
CLIENT_CACHE_CONTROL: "public, max-age=15552000"
|
||||
extra_hosts:
|
||||
- "www.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
|
@ -0,0 +1,65 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
ports:
|
||||
- 80:80
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
volumes:
|
||||
- ./image.png:/var/www/html/image.png
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24"
|
||||
HTTP_PORT: "80"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? CLIENT_CACHE settings
|
||||
USE_CLIENT_CACHE: "no"
|
||||
CLIENT_CACHE_EXTENSIONS: "jpg|jpeg|png|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2"
|
||||
CLIENT_CACHE_ETAG: "yes"
|
||||
CLIENT_CACHE_CONTROL: "public, max-age=15552000"
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-docker:
|
Binary file not shown.
After Width: | Height: | Size: 30 KiB |
|
@ -0,0 +1,89 @@
|
|||
from contextlib import suppress
|
||||
from os import getenv
|
||||
from requests import RequestException, get
|
||||
from traceback import format_exc
|
||||
from time import sleep
|
||||
|
||||
|
||||
try:
|
||||
ready = False
|
||||
retries = 0
|
||||
while not ready:
|
||||
with suppress(RequestException):
|
||||
status_code = get(
|
||||
"http://www.example.com/image.png", headers={"Host": "www.example.com"}
|
||||
).status_code
|
||||
|
||||
if status_code >= 500:
|
||||
print("❌ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
ready = status_code < 400
|
||||
|
||||
if retries > 10:
|
||||
print("❌ The service took too long to be ready, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not ready:
|
||||
retries += 1
|
||||
print(
|
||||
"⚠️ Waiting for the service to be ready, retrying in 5s ...", flush=True
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
use_client_cache = getenv("USE_CLIENT_CACHE", "no") == "yes"
|
||||
default_cache_extensions = (
|
||||
getenv(
|
||||
"CLIENT_CACHE_EXTENSIONS",
|
||||
"jpg|jpeg|png|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2",
|
||||
)
|
||||
== "jpg|jpeg|png|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2"
|
||||
)
|
||||
client_cache_etag = getenv("CLIENT_CACHE_ETAG", "yes") == "yes"
|
||||
client_cache_control = getenv("CLIENT_CACHE_CONTROL", "public, max-age=15552000")
|
||||
|
||||
print(
|
||||
"ℹ️ Sending a request to http://www.example.com/image.png ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
response = get(
|
||||
"http://www.example.com/image.png", headers={"Host": "www.example.com"}
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
if not use_client_cache:
|
||||
if "Cache-Control" in response.headers:
|
||||
print(
|
||||
f"❌ Cache-Control header is present even if Client cache is deactivated, exiting ...\nheaders: {response.headers}"
|
||||
)
|
||||
exit(1)
|
||||
else:
|
||||
if "Cache-Control" not in response.headers and default_cache_extensions:
|
||||
print(
|
||||
f"❌ Cache-Control header is not present even if Client cache is activated, exiting ...\nheaders: {response.headers}"
|
||||
)
|
||||
exit(1)
|
||||
elif not default_cache_extensions and "Cache-Control" in response.headers:
|
||||
print(
|
||||
f"❌ Cache-Control header is present even if the png extension is not in the list of extensions, exiting ...\nheaders: {response.headers}",
|
||||
flush=True,
|
||||
)
|
||||
elif not client_cache_etag and "ETag" in response.headers:
|
||||
print(
|
||||
f"❌ ETag header is present even if Client cache ETag is deactivated, exiting ...\nheaders: {response.headers}"
|
||||
)
|
||||
exit(1)
|
||||
elif default_cache_extensions and client_cache_control != response.headers.get(
|
||||
"Cache-Control"
|
||||
):
|
||||
print(
|
||||
f"❌ Cache-Control header is not equal to the expected value, exiting ...\nheaders: {response.headers}"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ Client cache is working as expected ...", flush=True)
|
||||
except SystemExit:
|
||||
exit(1)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1 @@
|
|||
requests==2.30.0
|
|
@ -0,0 +1,120 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "📝 Building clientcache stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📝 Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📝 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_CLIENT_CACHE: "yes"@USE_CLIENT_CACHE: "no"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CLIENT_CACHE_EXTENSIONS: "jpg|jpeg|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2"@CLIENT_CACHE_EXTENSIONS: "jpg|jpeg|png|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CLIENT_CACHE_ETAG: "no"@CLIENT_CACHE_ETAG: "yes"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CLIENT_CACHE_CONTROL: "public, max-age=3600"@CLIENT_CACHE_CONTROL: "public, max-age=15552000"@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "📝 Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📝 Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "📝 Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
for test in "deactivated" "activated" "cache_extensions" "cache_etag" "cache_control"
|
||||
do
|
||||
if [ "$test" = "deactivated" ] ; then
|
||||
echo "📝 Running tests without clientcache ..."
|
||||
elif [ "$test" = "activated" ] ; then
|
||||
echo "📝 Running tests with clientcache ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_CLIENT_CACHE: "no"@USE_CLIENT_CACHE: "yes"@' {} \;
|
||||
elif [ "$test" = "cache_extensions" ] ; then
|
||||
echo "📝 Running tests when removing png from the cache extensions ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CLIENT_CACHE_EXTENSIONS: "jpg|jpeg|png|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2"@CLIENT_CACHE_EXTENSIONS: "jpg|jpeg|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2"@' {} \;
|
||||
elif [ "$test" = "cache_etag" ] ; then
|
||||
echo "📝 Running tests when deactivating the etag ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CLIENT_CACHE_EXTENSIONS: "jpg|jpeg|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2"@CLIENT_CACHE_EXTENSIONS: "jpg|jpeg|png|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CLIENT_CACHE_ETAG: "yes"@CLIENT_CACHE_ETAG: "no"@' {} \;
|
||||
elif [ "$test" = "cache_control" ] ; then
|
||||
echo "📝 Running tests whith clientcache control set to public, max-age=3600 ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CLIENT_CACHE_ETAG: "no"@CLIENT_CACHE_ETAG: "yes"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CLIENT_CACHE_CONTROL: "public, max-age=15552000"@CLIENT_CACHE_CONTROL: "public, max-age=3600"@' {} \;
|
||||
fi
|
||||
|
||||
echo "📝 Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📝 Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "📝 Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("clientcache-bw-1" "clientcache-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "📝 Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "📝 Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
docker compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📝 Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "📝 Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "📝 Tests are done ! ✅"
|
|
@ -0,0 +1,25 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
# Install firefox and geckodriver
|
||||
RUN apk add --no-cache --virtual .build-deps curl grep zip && \
|
||||
apk add --no-cache firefox
|
||||
|
||||
# Installing geckodriver for firefox...
|
||||
RUN GECKODRIVER_VERSION=`curl -i https://github.com/mozilla/geckodriver/releases/latest | grep -Po 'v[0-9]+.[0-9]+.[0-9]+'` && \
|
||||
wget -O geckodriver.tar.gz https://github.com/mozilla/geckodriver/releases/download/$GECKODRIVER_VERSION/geckodriver-$GECKODRIVER_VERSION-linux64.tar.gz && \
|
||||
tar -C /usr/local/bin -xzvf geckodriver.tar.gz && \
|
||||
chmod +x /usr/local/bin/geckodriver && \
|
||||
rm geckodriver.tar.gz
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,23 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_CORS: "no"
|
||||
CORS_ALLOW_ORIGIN: "*"
|
||||
CORS_EXPOSE_HEADERS: "Content-Length,Content-Range"
|
||||
CORS_MAX_AGE: "86400"
|
||||
CORS_ALLOW_CREDENTIALS: "no"
|
||||
CORS_ALLOW_METHODS: "GET, POST, OPTIONS"
|
||||
CORS_ALLOW_HEADERS: "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range"
|
||||
extra_hosts:
|
||||
- "www.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
|
@ -0,0 +1,69 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
volumes:
|
||||
- ./index.html:/var/www/html/index.html
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24"
|
||||
HTTP_PORT: "80"
|
||||
HTTPS_PORT: "443"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
LOG_LEVEL: "info"
|
||||
GENERATE_SELF_SIGNED_SSL: "yes"
|
||||
ALLOWED_METHODS: "GET|POST|HEAD|OPTIONS"
|
||||
|
||||
# ? CORS settings
|
||||
USE_CORS: "no"
|
||||
CORS_ALLOW_ORIGIN: "*"
|
||||
CORS_EXPOSE_HEADERS: "Content-Length,Content-Range"
|
||||
CORS_MAX_AGE: "86400"
|
||||
CORS_ALLOW_CREDENTIALS: "no"
|
||||
CORS_ALLOW_METHODS: "GET, POST, OPTIONS"
|
||||
CORS_ALLOW_HEADERS: "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range"
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-docker:
|
|
@ -0,0 +1,220 @@
|
|||
from contextlib import suppress
|
||||
from os import getenv
|
||||
from requests import RequestException, get, head, options
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.firefox.options import Options
|
||||
from selenium.common.exceptions import JavascriptException
|
||||
from traceback import format_exc
|
||||
from time import sleep
|
||||
|
||||
|
||||
try:
|
||||
ready = False
|
||||
retries = 0
|
||||
while not ready:
|
||||
with suppress(RequestException):
|
||||
status_code = get(
|
||||
"https://www.example.com",
|
||||
headers={"Host": "www.example.com"},
|
||||
verify=False,
|
||||
).status_code
|
||||
|
||||
if status_code >= 500:
|
||||
print("❌ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
ready = status_code < 400
|
||||
|
||||
if retries > 10:
|
||||
print("❌ The service took too long to be ready, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not ready:
|
||||
retries += 1
|
||||
print(
|
||||
"⚠️ Waiting for the service to be ready, retrying in 5s ...", flush=True
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
firefox_options = Options()
|
||||
firefox_options.add_argument("--headless")
|
||||
|
||||
use_cors = getenv("USE_CORS", "no")
|
||||
cors_allow_origin = getenv("CORS_ALLOW_ORIGIN", "*")
|
||||
cors_expose_headers = getenv("CORS_EXPOSE_HEADERS", "Content-Length,Content-Range")
|
||||
cors_max_age = getenv("CORS_MAX_AGE", "86400")
|
||||
cors_allow_credentials = getenv("CORS_ALLOW_CREDENTIALS", "no") == "yes"
|
||||
cors_allow_methods = getenv("CORS_ALLOW_METHODS", "GET, POST, OPTIONS")
|
||||
cors_allow_headers = getenv(
|
||||
"CORS_ALLOW_HEADERS",
|
||||
"DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range",
|
||||
)
|
||||
|
||||
if any(
|
||||
[
|
||||
cors_allow_origin != "*",
|
||||
cors_expose_headers != "Content-Length,Content-Range",
|
||||
]
|
||||
):
|
||||
print(
|
||||
"ℹ️ Sending a HEAD request to https://www.example.com ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
response = head(
|
||||
"https://www.example.com", headers={"Host": "www.example.com"}, verify=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
if any(
|
||||
header in response.headers
|
||||
for header in (
|
||||
"Access-Control-Max-Age",
|
||||
"Access-Control-Allow-Credentials",
|
||||
"Access-Control-Allow-Methods",
|
||||
"Access-Control-Allow-Headers",
|
||||
)
|
||||
):
|
||||
print(
|
||||
f"❌ One of the preflight request headers is present in the response headers, it should not be ...\nheaders: {response.headers}",
|
||||
)
|
||||
exit(1)
|
||||
elif cors_allow_origin != response.headers.get("Access-Control-Allow-Origin"):
|
||||
print(
|
||||
f"❌ The Access-Control-Allow-Origin header is set to {response.headers.get('Access-Control-Allow-Origin', 'missing')}, it should be {cors_allow_origin} ...\nheaders: {response.headers}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif cors_allow_origin != "*":
|
||||
print(
|
||||
f"✅ The Access-Control-Allow-Origin header is set to {cors_allow_origin} ...",
|
||||
flush=True,
|
||||
)
|
||||
elif cors_expose_headers != response.headers.get(
|
||||
"Access-Control-Expose-Headers"
|
||||
):
|
||||
print(
|
||||
f"❌ The Access-Control-Expose-Headers header is set to {response.headers.get('Access-Control-Expose-Headers', 'missing')}, it should be {cors_expose_headers} ...\nheaders: {response.headers}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif cors_expose_headers != "Content-Length,Content-Range":
|
||||
print(
|
||||
f"✅ The Access-Control-Expose-Headers header is set to {cors_expose_headers} ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
exit(0)
|
||||
elif any(
|
||||
[
|
||||
cors_max_age != "86400",
|
||||
cors_allow_credentials,
|
||||
cors_allow_methods != "GET, POST, OPTIONS",
|
||||
cors_allow_headers
|
||||
!= "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range",
|
||||
]
|
||||
):
|
||||
print(
|
||||
"ℹ️ Sending a preflight request to https://www.example.com ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
response = options(
|
||||
"https://www.example.com", headers={"Host": "www.example.com"}, verify=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
if (
|
||||
not cors_allow_credentials
|
||||
and "Access-Control-Allow-Credentials" in response.headers
|
||||
):
|
||||
print(
|
||||
f"❌ The Access-Control-Allow-Credentials header is present in the response headers while the setting CORS_ALLOW_CREDENTIALS is set to {cors_allow_credentials}, it should not be ...\nheaders: {response.headers}",
|
||||
)
|
||||
exit(1)
|
||||
elif cors_max_age != response.headers.get("Access-Control-Max-Age"):
|
||||
print(
|
||||
f"❌ The Access-Control-Max-Age header is set to {response.headers.get('Access-Control-Max-Age', 'missing')}, it should be {cors_max_age} ...\nheaders: {response.headers}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif cors_max_age != "86400":
|
||||
print(
|
||||
f"✅ The Access-Control-Max-Age header is set to {cors_max_age} ...",
|
||||
flush=True,
|
||||
)
|
||||
elif (
|
||||
cors_allow_credentials
|
||||
and "Access-Control-Allow-Credentials" not in response.headers
|
||||
):
|
||||
print(
|
||||
f"❌ The Access-Control-Allow-Credentials header is not present in the response headers while the setting CORS_ALLOW_CREDENTIALS is set to {cors_allow_credentials}, it should be ...\nheaders: {response.headers}",
|
||||
)
|
||||
exit(1)
|
||||
elif cors_allow_methods != response.headers.get("Access-Control-Allow-Methods"):
|
||||
print(
|
||||
f"❌ The Access-Control-Allow-Methods header is set to {response.headers.get('Access-Control-Allow-Methods', 'missing')}, it should be {cors_allow_methods} ...\nheaders: {response.headers}",
|
||||
)
|
||||
exit(1)
|
||||
elif cors_allow_methods != "GET, POST, OPTIONS":
|
||||
print(
|
||||
f"✅ The Access-Control-Allow-Methods is set to {cors_allow_methods} ...",
|
||||
flush=True,
|
||||
)
|
||||
elif cors_allow_headers != response.headers.get("Access-Control-Allow-Headers"):
|
||||
print(
|
||||
f"❌ The Access-Control-Allow-Headers header is set to {response.headers.get('Access-Control-Allow-Headers', 'missing')}, it should be {cors_allow_headers} ...\nheaders: {response.headers}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif (
|
||||
cors_allow_headers
|
||||
!= "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range"
|
||||
):
|
||||
print(
|
||||
f"✅ The Access-Control-Allow-Headers header is set to {cors_allow_headers} ...",
|
||||
flush=True,
|
||||
)
|
||||
else:
|
||||
print(
|
||||
f"✅ The Access-Control-Allow-Credentials header is present and set to {cors_allow_credentials} ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
exit(0)
|
||||
|
||||
print("ℹ️ Starting Firefox ...", flush=True)
|
||||
with webdriver.Firefox(options=firefox_options) as driver:
|
||||
driver.delete_all_cookies()
|
||||
driver.maximize_window()
|
||||
|
||||
print(
|
||||
"ℹ️ Sending a javascript request to https://www.example.com ...",
|
||||
flush=True,
|
||||
)
|
||||
error = False
|
||||
|
||||
try:
|
||||
driver.execute_script(
|
||||
"""var xhttp = new XMLHttpRequest();
|
||||
xhttp.open("GET", "https://www.example.com", false);
|
||||
xhttp.setRequestHeader("Host", "www.example.com");
|
||||
xhttp.send();"""
|
||||
)
|
||||
except JavascriptException as e:
|
||||
if not f"{e}".startswith("Message: NetworkError"):
|
||||
print(f"❌ {e}", flush=True)
|
||||
error = True
|
||||
|
||||
if use_cors == "no" and not error:
|
||||
print("❌ CORS is enabled, it shouldn't be, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif use_cors == "yes" and error:
|
||||
print("❌ CORS are not working as expected, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
print("✅ CORS are working as expected ...", flush=True)
|
||||
except SystemExit as e:
|
||||
exit(e.code)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1,2 @@
|
|||
requests==2.30.0
|
||||
selenium==4.9.1
|
|
@ -0,0 +1,135 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "🛰️ Building cors stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🛰️ Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🛰️ Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_CORS: "yes"@USE_CORS: "no"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_ORIGIN: "http://www.example.com"@CORS_ALLOW_ORIGIN: "\*"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_EXPOSE_HEADERS: "X-Test"@CORS_EXPOSE_HEADERS: "Content-Length,Content-Range"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_MAX_AGE: "3600"@CORS_MAX_AGE: "86400"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_CREDENTIALS: "yes"@CORS_ALLOW_CREDENTIALS: "no"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_METHODS: "GET, HEAD, POST, OPTIONS"@CORS_ALLOW_METHODS: "GET, POST, OPTIONS"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_HEADERS: "X-Test"@CORS_ALLOW_HEADERS: "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range"@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🛰️ Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🛰️ Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🛰️ Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
for test in "deactivated" "activated" "allow_origin" "expose_headers" "max_age" "allow_credentials" "allow_methods" "allow_headers"
|
||||
do
|
||||
if [ "$test" = "deactivated" ] ; then
|
||||
echo "🛰️ Running tests without cors ..."
|
||||
elif [ "$test" = "activated" ] ; then
|
||||
echo "🛰️ Running tests with cors ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_CORS: "no"@USE_CORS: "yes"@' {} \;
|
||||
elif [ "$test" = "allow_origin" ] ; then
|
||||
echo "🛰️ Running tests with cors allow origin set to http://www.example.com ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_ORIGIN: "\*"@CORS_ALLOW_ORIGIN: "http://www.example.com"@' {} \;
|
||||
elif [ "$test" = "expose_headers" ] ; then
|
||||
echo "🛰️ Running tests with cors expose headers set to X-Test ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_ORIGIN: "http://www.example.com"@CORS_ALLOW_ORIGIN: "\*"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_EXPOSE_HEADERS: "Content-Length,Content-Range"@CORS_EXPOSE_HEADERS: "X-Test"@' {} \;
|
||||
elif [ "$test" = "max_age" ] ; then
|
||||
echo "🛰️ Running tests with cors max age set to 3600 ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_EXPOSE_HEADERS: "X-Test"@CORS_EXPOSE_HEADERS: "Content-Length,Content-Range"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_MAX_AGE: "86400"@CORS_MAX_AGE: "3600"@' {} \;
|
||||
elif [ "$test" = "allow_credentials" ] ; then
|
||||
echo "🛰️ Running tests with cors allow credentials is set to yes ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_MAX_AGE: "3600"@CORS_MAX_AGE: "86400"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_CREDENTIALS: "no"@CORS_ALLOW_CREDENTIALS: "yes"@' {} \;
|
||||
elif [ "$test" = "allow_methods" ] ; then
|
||||
echo "🛰️ Running tests with cors allow methods is set to GET, HEAD, POST, OPTIONS ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_CREDENTIALS: "yes"@CORS_ALLOW_CREDENTIALS: "no"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_METHODS: "GET, POST, OPTIONS"@CORS_ALLOW_METHODS: "GET, HEAD, POST, OPTIONS"@' {} \;
|
||||
elif [ "$test" = "allow_headers" ] ; then
|
||||
echo "🛰️ Running tests with cors allow headers is set to X-Test ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_METHODS: "GET, HEAD, POST, OPTIONS"@CORS_ALLOW_METHODS: "GET, POST, OPTIONS"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_HEADERS: "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range"@CORS_ALLOW_HEADERS: "X-Test"@' {} \;
|
||||
fi
|
||||
|
||||
echo "🛰️ Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🛰️ Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "🛰️ Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("cors-bw-1" "cors-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "🛰️ Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "🛰️ Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
docker compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🛰️ Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "🛰️ Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "🛰️ Tests are done ! ✅"
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,34 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests-fr:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
COUNTRY: "FR"
|
||||
BLACKLIST_COUNTRY: ""
|
||||
WHITELIST_COUNTRY: ""
|
||||
extra_hosts:
|
||||
- "www.example.com:2.0.0.2"
|
||||
networks:
|
||||
bw-fr-network:
|
||||
ipv4_address: 2.0.0.3
|
||||
|
||||
tests-us:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
COUNTRY: "US"
|
||||
BLACKLIST_COUNTRY: ""
|
||||
WHITELIST_COUNTRY: ""
|
||||
extra_hosts:
|
||||
- "www.example.com:8.0.0.2"
|
||||
networks:
|
||||
bw-us-network:
|
||||
ipv4_address: 8.0.0.3
|
||||
|
||||
networks:
|
||||
bw-fr-network:
|
||||
external: true
|
||||
bw-us-network:
|
||||
external: true
|
|
@ -0,0 +1,70 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
volumes:
|
||||
- ./index.html:/var/www/html/index.html
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24 192.168.0.3"
|
||||
HTTP_PORT: "80"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? COUNTRY settings
|
||||
BLACKLIST_COUNTRY: ""
|
||||
WHITELIST_COUNTRY: ""
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-us-network:
|
||||
ipv4_address: 8.0.0.2
|
||||
bw-fr-network:
|
||||
ipv4_address: 2.0.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-us-network:
|
||||
name: bw-us-network
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 8.0.0.0/8
|
||||
bw-fr-network:
|
||||
name: bw-fr-network
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 2.0.0.0/8
|
||||
bw-docker:
|
||||
name: bw-docker
|
|
@ -0,0 +1,75 @@
|
|||
from contextlib import suppress
|
||||
from os import getenv
|
||||
from requests import get
|
||||
from requests.exceptions import RequestException
|
||||
from time import sleep
|
||||
from traceback import format_exc
|
||||
|
||||
try:
|
||||
ready = False
|
||||
retries = 0
|
||||
while not ready:
|
||||
with suppress(RequestException):
|
||||
status_code = get(
|
||||
"http://www.example.com", headers={"Host": "www.example.com"}
|
||||
).status_code
|
||||
|
||||
if status_code >= 500:
|
||||
print("❌ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
ready = status_code < 400 or status_code == 403
|
||||
|
||||
if retries > 10:
|
||||
print("❌ The service took too long to be ready, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not ready:
|
||||
retries += 1
|
||||
print(
|
||||
"⚠️ Waiting for the service to be ready, retrying in 5s ...", flush=True
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
country = getenv("COUNTRY")
|
||||
blacklist_country = getenv("BLACKLIST_COUNTRY", "")
|
||||
whitelist_country = getenv("WHITELIST_COUNTRY", "")
|
||||
|
||||
print(
|
||||
"ℹ️ Sending a request to http://www.example.com ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
status_code = get(
|
||||
f"http://www.example.com",
|
||||
headers={"Host": "www.example.com"},
|
||||
).status_code
|
||||
|
||||
if status_code == 403:
|
||||
if not blacklist_country and not whitelist_country:
|
||||
print(
|
||||
"❌ Got rejected even though there are no country blacklisted or whitelisted, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif country == whitelist_country:
|
||||
print(
|
||||
f"❌ Got rejected even if the current country ({country}) is whitelisted, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ Got rejected, as expected ...")
|
||||
else:
|
||||
if country == blacklist_country:
|
||||
print(
|
||||
f"❌ Didn't get rejected even if the current country ({country}) is blacklisted, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ Didn't get rejected, as expected ...")
|
||||
except SystemExit:
|
||||
exit(1)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1 @@
|
|||
requests==2.30.0
|
|
@ -0,0 +1,124 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "🌍 Building country stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🌍 Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🌍 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_COUNTRY: "US"@BLACKLIST_COUNTRY: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@WHITELIST_COUNTRY: "FR"@WHITELIST_COUNTRY: ""@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🌍 Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🌍 Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🌍 Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
for test in "deactivated" "blacklist" "whitelist"
|
||||
do
|
||||
if [ "$test" = "deactivated" ] ; then
|
||||
echo "🌍 Running tests without the country plugin ..."
|
||||
elif [ "$test" = "blacklist" ] ; then
|
||||
echo "🌍 Running tests when blacklisting United States ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_COUNTRY: ""@BLACKLIST_COUNTRY: "US"@' {} \;
|
||||
elif [ "$test" = "whitelist" ] ; then
|
||||
echo "🌍 Running tests when whitelisting France ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_COUNTRY: "US"@BLACKLIST_COUNTRY: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@WHITELIST_COUNTRY: ""@WHITELIST_COUNTRY: "FR"@' {} \;
|
||||
fi
|
||||
|
||||
echo "🌍 Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🌍 Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "🌍 Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("country-bw-1" "country-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "🌍 Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "🌍 Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
echo "🌍 Starting the FR container"
|
||||
docker compose -f docker-compose.test.yml up tests-fr --abort-on-container-exit --exit-code-from tests-fr 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🌍 Test \"$test\" failed for the FR container ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "🌍 Test \"$test\" succeeded for the FR container ✅"
|
||||
fi
|
||||
|
||||
echo "🌍 Starting the US container"
|
||||
docker compose -f docker-compose.test.yml up tests-us --abort-on-container-exit --exit-code-from tests-us 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🌍 Test \"$test\" failed for the US container ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "🌍 Test \"$test\" succeeded for the US container ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "🌍 Tests are done ! ✅"
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,9 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
init:
|
||||
build: init
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
volumes:
|
||||
- ./init/certs:/certs
|
|
@ -0,0 +1,17 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_CUSTOM_SSL: "no"
|
||||
extra_hosts:
|
||||
- "www.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
|
@ -0,0 +1,69 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
ports:
|
||||
- 80:80
|
||||
- 443:443
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
volumes:
|
||||
- ./index.html:/var/www/html/index.html
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24 192.168.0.3"
|
||||
HTTP_PORT: "80"
|
||||
HTTPS_PORT: "443"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? CUSTOM_CERT settings
|
||||
USE_CUSTOM_SSL: "no"
|
||||
CUSTOM_SSL_CERT: "/certs/certificate.pem"
|
||||
CUSTOM_SSL_KEY: "/certs/privatekey.key"
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
volumes:
|
||||
- ./init/certs:/certs
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-docker:
|
||||
name: bw-docker
|
|
@ -0,0 +1,11 @@
|
|||
FROM alpine
|
||||
|
||||
RUN apk add --no-cache bash openssl
|
||||
|
||||
WORKDIR /opt/init
|
||||
|
||||
COPY entrypoint.sh .
|
||||
|
||||
RUN chmod +x entrypoint.sh
|
||||
|
||||
ENTRYPOINT [ "./entrypoint.sh" ]
|
|
@ -0,0 +1,7 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "ℹ️ Generating certificate for www.example.com ..."
|
||||
openssl req -nodes -x509 -newkey rsa:4096 -keyout /certs/privatekey.key -out /certs/certificate.pem -days 365 -subj /CN=www.example.com/
|
||||
|
||||
chown -R root:101 /certs
|
||||
chmod -R 777 /certs
|
|
@ -0,0 +1,49 @@
|
|||
from os import getenv
|
||||
from requests import get
|
||||
from requests.exceptions import RequestException
|
||||
from traceback import format_exc
|
||||
|
||||
try:
|
||||
use_custom_ssl = getenv("USE_CUSTOM_SSL", "no") == "yes"
|
||||
|
||||
print(
|
||||
"ℹ️ Sending a request to http://www.example.com ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
try:
|
||||
get("http://www.example.com", headers={"Host": "www.example.com"})
|
||||
except RequestException:
|
||||
if not use_custom_ssl:
|
||||
print(
|
||||
"❌ The request failed even though the Custom Cert isn't activated, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if not use_custom_ssl:
|
||||
print("✅ The Custom Cert isn't activated, as expected ...", flush=True)
|
||||
exit(0)
|
||||
|
||||
print(
|
||||
"ℹ️ Sending a request to https://www.example.com ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
try:
|
||||
get(
|
||||
"https://www.example.com", headers={"Host": "www.example.com"}, verify=False
|
||||
)
|
||||
except RequestException:
|
||||
print(
|
||||
"❌ The request failed even though the Custom Cert is activated, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ The Custom Cert is activated, as expected ...", flush=True)
|
||||
except SystemExit as e:
|
||||
exit(e.code)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1 @@
|
|||
requests==2.30.0
|
|
@ -0,0 +1,122 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "🔏 Building customcert stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔏 Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔏 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
rm -rf init/certs
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_CUSTOM_SSL: "yes"@USE_CUSTOM_SSL: "no"@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🔏 Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔏 Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🔏 Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
echo "🔏 Initializing workspace ..."
|
||||
rm -rf init/certs
|
||||
mkdir -p init/certs
|
||||
docker compose -f docker-compose.init.yml up --build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔏 Build failed ❌"
|
||||
exit 1
|
||||
elif ! [[ -f "init/certs/certificate.pem" ]]; then
|
||||
echo "🔏 certificate.pem not found ❌"
|
||||
exit 1
|
||||
elif ! [[ -f "init/certs/privatekey.key" ]]; then
|
||||
echo "🔏 privatekey.key not found ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for test in "deactivated" "activated"
|
||||
do
|
||||
if [ "$test" = "deactivated" ] ; then
|
||||
echo "🔏 Running tests without the custom cert ..."
|
||||
elif [ "$test" = "activated" ] ; then
|
||||
echo "🔏 Running tests with the custom cert activated ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_CUSTOM_SSL: "no"@USE_CUSTOM_SSL: "yes"@' {} \;
|
||||
fi
|
||||
|
||||
echo "🔏 Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔏 Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "🔏 Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("customcert-bw-1" "customcert-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "🔏 Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "🔏 Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
docker compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔏 Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "🔏 Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "🔏 Tests are done ! ✅"
|
|
@ -0,0 +1,23 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
RUN addgroup -g 101 nginx && \
|
||||
adduser -h /opt/tests -g nginx -s /bin/sh -G nginx -D -H -u 101 nginx
|
||||
|
||||
COPY --chown=nginx:nginx main.py .
|
||||
ADD ./init/plugins external
|
||||
|
||||
RUN chown -R nginx:nginx external && \
|
||||
chmod -R 777 external
|
||||
|
||||
USER nginx:nginx
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,9 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
init:
|
||||
build: init
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
volumes:
|
||||
- ./init/plugins:/plugins
|
|
@ -0,0 +1,42 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
volumes:
|
||||
- bw-data:/data/lib
|
||||
- bw-db:/opt/tests/db
|
||||
- bw-core-plugins:/opt/tests/core
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
DATABASE_URI: "sqlite:////var/lib/bunkerweb/db.sqlite3"
|
||||
GLOBAL_API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24"
|
||||
GLOBAL_MULTISITE: "no"
|
||||
GLOBAL_HTTP_PORT: "80"
|
||||
GLOBAL_USE_BUNKERNET: "no"
|
||||
GLOBAL_USE_BLACKLIST: "no"
|
||||
GLOBAL_USE_REVERSE_PROXY: "yes"
|
||||
GLOBAL_REVERSE_PROXY_HOST: "http://app1:8080"
|
||||
GLOBAL_REVERSE_PROXY_URL: "/"
|
||||
GLOBAL_LOG_LEVEL: "info"
|
||||
CUSTOM_CONF_MODSEC_test_custom_conf: 'SecRule REQUEST_FILENAME "@rx ^/db" "id:1,ctl:ruleRemoveByTag=attack-generic,ctl:ruleRemoveByTag=attack-protocol,nolog"'
|
||||
extra_hosts:
|
||||
- "bwadm.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-docker:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
volumes:
|
||||
bw-data:
|
||||
external: true
|
||||
bw-db:
|
||||
external: true
|
||||
bw-core-plugins:
|
||||
external: true
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
||||
bw-docker:
|
||||
external: true
|
|
@ -0,0 +1,112 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
environment:
|
||||
SERVER_NAME: "bwadm.example.com"
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24"
|
||||
MULTISITE: "no"
|
||||
HTTP_PORT: "80"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
USE_REVERSE_PROXY: "yes"
|
||||
REVERSE_PROXY_HOST: "http://app1:8080"
|
||||
REVERSE_PROXY_URL: "/"
|
||||
LOG_LEVEL: "info"
|
||||
CUSTOM_CONF_MODSEC_test_custom_conf: 'SecRule REQUEST_FILENAME "@rx ^/db" "id:1,ctl:ruleRemoveByTag=attack-generic,ctl:ruleRemoveByTag=attack-protocol,nolog"'
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
volumes:
|
||||
- bw-data:/data/lib
|
||||
- bw-db:/usr/share/bunkerweb/db
|
||||
- bw-core-plugins:/usr/share/bunkerweb/core
|
||||
- ./init/plugins:/data/plugins
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
# ? DATABASE settings
|
||||
DATABASE_URI: "sqlite:////var/lib/bunkerweb/db.sqlite3"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
app1:
|
||||
image: nginxdemos/nginx-hello
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.4
|
||||
|
||||
bw-maria-db:
|
||||
image: mariadb:10.10
|
||||
environment:
|
||||
- MYSQL_RANDOM_ROOT_PASSWORD=yes
|
||||
- MYSQL_DATABASE=db
|
||||
- MYSQL_USER=bunkerweb
|
||||
- MYSQL_PASSWORD=secret
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
bw-mysql-db:
|
||||
image: mysql:8.0
|
||||
environment:
|
||||
- MYSQL_RANDOM_ROOT_PASSWORD=yes
|
||||
- MYSQL_DATABASE=db
|
||||
- MYSQL_USER=bunkerweb
|
||||
- MYSQL_PASSWORD=secret
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
bw-postgres-db:
|
||||
image: postgres:15.1
|
||||
environment:
|
||||
- POSTGRES_USER=bunkerweb
|
||||
- POSTGRES_PASSWORD=secret
|
||||
- POSTGRES_DB=db
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
volumes:
|
||||
bw-data:
|
||||
name: bw-data
|
||||
bw-db:
|
||||
name: bw-db
|
||||
bw-core-plugins:
|
||||
name: bw-core-plugins
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-docker:
|
||||
name: bw-docker
|
|
@ -0,0 +1,11 @@
|
|||
FROM alpine
|
||||
|
||||
RUN apk add --no-cache bash git
|
||||
|
||||
WORKDIR /opt/init
|
||||
|
||||
COPY entrypoint.sh .
|
||||
|
||||
RUN chmod +x entrypoint.sh
|
||||
|
||||
ENTRYPOINT [ "./entrypoint.sh" ]
|
|
@ -0,0 +1,17 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "ℹ️ Cloning BunkerWeb Plugins ..."
|
||||
|
||||
git clone https://github.com/bunkerity/bunkerweb-plugins.git
|
||||
|
||||
echo "ℹ️ Checking out to dev branch ..."
|
||||
|
||||
cd bunkerweb-plugins
|
||||
git checkout dev # TODO: remove this when the next release of bw-plugins is out
|
||||
|
||||
echo "ℹ️ Extracting ClamAV plugin ..."
|
||||
|
||||
cp -r clamav /plugins/
|
||||
|
||||
chown -R root:101 /plugins
|
||||
chmod -R 777 /plugins
|
|
@ -0,0 +1,984 @@
|
|||
from contextlib import contextmanager
|
||||
from glob import iglob
|
||||
from hashlib import sha512
|
||||
from json import dumps, load
|
||||
from os import environ, getenv
|
||||
from os.path import dirname, join
|
||||
from pathlib import Path
|
||||
from re import compile as re_compile
|
||||
from sqlalchemy import create_engine, text
|
||||
from sqlalchemy.exc import (
|
||||
ArgumentError,
|
||||
DatabaseError,
|
||||
OperationalError,
|
||||
SQLAlchemyError,
|
||||
)
|
||||
from sqlalchemy.orm import scoped_session, sessionmaker
|
||||
from traceback import format_exc
|
||||
from time import sleep
|
||||
|
||||
from db.model import (
|
||||
Custom_configs,
|
||||
Global_values,
|
||||
Jobs,
|
||||
Metadata,
|
||||
Plugins,
|
||||
Plugin_pages,
|
||||
Services,
|
||||
Services_settings,
|
||||
Settings,
|
||||
)
|
||||
|
||||
try:
|
||||
database_uri = getenv("DATABASE_URI", "sqlite:////var/lib/bunkerweb/db.sqlite3")
|
||||
|
||||
if database_uri == "sqlite:////var/lib/bunkerweb/db.sqlite3":
|
||||
database_uri = "sqlite:////data/lib/db.sqlite3"
|
||||
|
||||
error = False
|
||||
|
||||
print(f"ℹ️ Connecting to database: {database_uri}", flush=True)
|
||||
|
||||
try:
|
||||
sql_engine = create_engine(
|
||||
database_uri,
|
||||
future=True,
|
||||
)
|
||||
except ArgumentError:
|
||||
print(f"❌ Invalid database URI: {database_uri}", flush=True)
|
||||
error = True
|
||||
except SQLAlchemyError:
|
||||
print(f"❌ Error when trying to create the engine: {format_exc()}", flush=True)
|
||||
error = True
|
||||
finally:
|
||||
if error:
|
||||
exit(1)
|
||||
|
||||
try:
|
||||
assert sql_engine is not None
|
||||
except AssertionError:
|
||||
print("❌ The database engine is not initialized", flush=True)
|
||||
exit(1)
|
||||
|
||||
not_connected = True
|
||||
retries = 15
|
||||
|
||||
while not_connected:
|
||||
try:
|
||||
with sql_engine.connect() as conn:
|
||||
conn.execute(text("CREATE TABLE IF NOT EXISTS test (id INT)"))
|
||||
conn.execute(text("DROP TABLE test"))
|
||||
not_connected = False
|
||||
except (OperationalError, DatabaseError) as e:
|
||||
if retries <= 0:
|
||||
print(f"❌ Can't connect to database : {format_exc()}", flush=True)
|
||||
exit(1)
|
||||
|
||||
if "attempt to write a readonly database" in str(e):
|
||||
print(
|
||||
"⚠️ The database is read-only, waiting for it to become writable. Retrying in 5 seconds ...",
|
||||
flush=True,
|
||||
)
|
||||
sql_engine.dispose(close=True)
|
||||
sql_engine = create_engine(
|
||||
database_uri,
|
||||
future=True,
|
||||
)
|
||||
if "Unknown table" in str(e):
|
||||
not_connected = False
|
||||
continue
|
||||
else:
|
||||
print(
|
||||
"⚠️ Can't connect to database, retrying in 5 seconds ...",
|
||||
flush=True,
|
||||
)
|
||||
retries -= 1
|
||||
sleep(5)
|
||||
except BaseException:
|
||||
print(
|
||||
f"❌ Error when trying to connect to the database: {format_exc()}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("ℹ️ Database connection established, launching tests ...", flush=True)
|
||||
|
||||
session = sessionmaker()
|
||||
sql_session = scoped_session(session)
|
||||
sql_session.remove()
|
||||
sql_session.configure(bind=sql_engine, autoflush=False, expire_on_commit=False)
|
||||
|
||||
@contextmanager
|
||||
def db_session():
|
||||
try:
|
||||
assert sql_session is not None
|
||||
except AssertionError:
|
||||
print("❌ The database session is not initialized", flush=True)
|
||||
exit(1)
|
||||
|
||||
session = sql_session()
|
||||
session.expire_on_commit = False
|
||||
|
||||
try:
|
||||
yield session
|
||||
except BaseException:
|
||||
session.rollback()
|
||||
raise
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
print("ℹ️ Checking if database is initialized ...", flush=True)
|
||||
|
||||
with db_session() as session:
|
||||
metadata = (
|
||||
session.query(Metadata)
|
||||
.with_entities(Metadata.is_initialized)
|
||||
.filter_by(id=1)
|
||||
.first()
|
||||
)
|
||||
|
||||
if metadata is None or not metadata.is_initialized:
|
||||
print(
|
||||
"❌ The database is not initialized, it should be, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ Database is initialized", flush=True)
|
||||
print(" ", flush=True)
|
||||
print("ℹ️ Checking if service bwadm.example.com is in the database ...", flush=True)
|
||||
|
||||
with db_session() as session:
|
||||
services = session.query(Services).all()
|
||||
|
||||
if not services:
|
||||
print(
|
||||
"❌ The bw_services database table is empty, it shouldn't be, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if services[0].id != "bwadm.example.com":
|
||||
print(
|
||||
"❌ The service bwadm.example.com is not in the database, it should be, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ Service bwadm.example.com is in the database", flush=True)
|
||||
print(" ", flush=True)
|
||||
print(
|
||||
"ℹ️ Checking if global values are in the database and are correct ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
global_settings = {}
|
||||
service_settings = {}
|
||||
multisite = getenv("GLOBAL_MULTISITE", "no") == "yes"
|
||||
for env in environ:
|
||||
if env.startswith("GLOBAL_"):
|
||||
if env == "GLOBAL_MULTISITE" and environ[env] == "no":
|
||||
continue
|
||||
global_settings[env[7:]] = {"value": environ[env], "checked": False}
|
||||
elif env.startswith("SERVICE_"):
|
||||
service_settings[env[8:]] = {"value": environ[env], "checked": False}
|
||||
|
||||
with db_session() as session:
|
||||
global_values = session.query(Global_values).all()
|
||||
|
||||
for global_value in global_values:
|
||||
if global_value.setting_id in global_settings:
|
||||
if (
|
||||
global_value.value
|
||||
!= global_settings[global_value.setting_id]["value"]
|
||||
):
|
||||
print(
|
||||
f"❌ The global value {global_value.setting_id} is in the database but is not correct, exiting ...\n{global_value.value} (database) != {global_settings[global_value.setting_id]['value']} (env)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif global_value.suffix != 0:
|
||||
print(
|
||||
f"❌ The global value {global_value.setting_id} is in the database but has the wrong suffix, exiting ...\n{global_value.suffix} (database) != 0 (env)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif global_value.method != "scheduler":
|
||||
print(
|
||||
f"❌ The global value {global_value.setting_id} is in the database but has the wrong method, exiting ...\n{global_value.method} (database) != scheduler (env)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
global_settings[global_value.setting_id]["checked"] = True
|
||||
else:
|
||||
print(
|
||||
f"❌ The global value {global_value.setting_id} is in the database but should not be, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if not all(
|
||||
[global_settings[global_value]["checked"] for global_value in global_settings]
|
||||
):
|
||||
print(
|
||||
f"❌ Not all global values are in the database, exiting ...\nmissing values: {', '.join([global_value for global_value in global_settings if not global_settings[global_value]['checked']])}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ Global values are in the database and are correct", flush=True)
|
||||
print(" ", flush=True)
|
||||
print(
|
||||
"ℹ️ Checking if service values are in the database and are correct ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
with db_session() as session:
|
||||
services_settings = session.query(Services_settings).all()
|
||||
|
||||
if not multisite and service_settings:
|
||||
print(
|
||||
'❌ The bw_services_settings database table is not empty, it should be when multisite is set to "no", exiting ...',
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
else:
|
||||
for service_setting in services_settings:
|
||||
if service_setting.setting_id in service_settings:
|
||||
if (
|
||||
service_setting.value
|
||||
!= service_settings[service_setting.setting_id]["value"]
|
||||
):
|
||||
print(
|
||||
f"❌ The service value {service_setting.setting_id} is in the database but is not correct, exiting ...\n{service_setting.value} (database) != {service_settings[service_setting.setting_id]['value']} (env)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif service_setting.suffix != 0:
|
||||
print(
|
||||
f"❌ The service value {service_setting.setting_id} is in the database but has the wrong suffix, exiting ...\n{service_setting.suffix} (database) != 0 (env)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif service_setting.method != "scheduler":
|
||||
print(
|
||||
f"❌ The service value {service_setting.setting_id} is in the database but has the wrong method, exiting ...\n{service_setting.method} (database) != scheduler (env)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
service_settings[service_setting.setting_id]["checked"] = True
|
||||
else:
|
||||
print(
|
||||
f"❌ The service value {service_setting.setting_id} is in the database but should not be, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if not all(
|
||||
[
|
||||
service_settings[service_setting]["checked"]
|
||||
for service_setting in service_settings
|
||||
]
|
||||
):
|
||||
print(
|
||||
f"❌ Not all service values are in the database, exiting ...\nmissing values: {', '.join([service_setting for service_setting in service_settings if not service_settings[service_setting]['checked']])}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ Service values are correct", flush=True)
|
||||
print(" ", flush=True)
|
||||
print("ℹ️ Checking if the plugins are correct ...", flush=True)
|
||||
|
||||
core_plugins = {
|
||||
"general": {
|
||||
"order": 999,
|
||||
"name": "General",
|
||||
"description": "The general settings for the server",
|
||||
"version": "0.1",
|
||||
"stream": "partial",
|
||||
"external": False,
|
||||
"checked": False,
|
||||
"page_checked": True,
|
||||
"settings": {
|
||||
"IS_LOADING": {
|
||||
"context": "global",
|
||||
"default": "no",
|
||||
"help": "Internal use : set to yes when BW is loading.",
|
||||
"id": "internal-use",
|
||||
"label": "internal use",
|
||||
"regex": "^(yes|no)$",
|
||||
"type": "check",
|
||||
},
|
||||
"NGINX_PREFIX": {
|
||||
"context": "global",
|
||||
"default": "/etc/nginx/",
|
||||
"help": "Where nginx will search for configurations.",
|
||||
"id": "nginx-prefix",
|
||||
"label": "nginx prefix",
|
||||
"regex": "^(/[\\w. -]+)*/$",
|
||||
"type": "text",
|
||||
},
|
||||
"HTTP_PORT": {
|
||||
"context": "global",
|
||||
"default": "8080",
|
||||
"help": "HTTP port number which bunkerweb binds to.",
|
||||
"id": "http-port",
|
||||
"label": "HTTP port",
|
||||
"regex": "^\\d+$",
|
||||
"type": "text",
|
||||
},
|
||||
"HTTPS_PORT": {
|
||||
"context": "global",
|
||||
"default": "8443",
|
||||
"help": "HTTPS port number which bunkerweb binds to.",
|
||||
"id": "https-port",
|
||||
"label": "HTTPS port",
|
||||
"regex": "^\\d+$",
|
||||
"type": "text",
|
||||
},
|
||||
"MULTISITE": {
|
||||
"context": "global",
|
||||
"default": "no",
|
||||
"help": "Multi site activation.",
|
||||
"id": "multisite",
|
||||
"label": "Multisite",
|
||||
"regex": "^(yes|no)$",
|
||||
"type": "check",
|
||||
},
|
||||
"SERVER_NAME": {
|
||||
"context": "multisite",
|
||||
"default": "www.example.com",
|
||||
"help": "List of the virtual hosts served by bunkerweb.",
|
||||
"id": "server-name",
|
||||
"label": "Server name",
|
||||
"regex": "^(?! )( ?((?=.{1,255}$)[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?(?:\\.[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?)*\\.?)(?!.* \\2))*$",
|
||||
"type": "text",
|
||||
},
|
||||
"WORKER_PROCESSES": {
|
||||
"context": "global",
|
||||
"default": "auto",
|
||||
"help": "Number of worker processes.",
|
||||
"id": "worker-processes",
|
||||
"label": "Worker processes",
|
||||
"regex": "^(auto|\\d+)$",
|
||||
"type": "text",
|
||||
},
|
||||
"WORKER_RLIMIT_NOFILE": {
|
||||
"context": "global",
|
||||
"default": "2048",
|
||||
"help": "Maximum number of open files for worker processes.",
|
||||
"id": "worker-rlimit-nofile",
|
||||
"label": "Open files per worker",
|
||||
"regex": "^\\d+$",
|
||||
"type": "text",
|
||||
},
|
||||
"WORKER_CONNECTIONS": {
|
||||
"context": "global",
|
||||
"default": "1024",
|
||||
"help": "Maximum number of connections per worker.",
|
||||
"id": "worker-connections",
|
||||
"label": "Connections per worker",
|
||||
"regex": "^\\d+$",
|
||||
"type": "text",
|
||||
},
|
||||
"LOG_FORMAT": {
|
||||
"context": "global",
|
||||
"default": '$host $remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent"',
|
||||
"help": "The format to use for access logs.",
|
||||
"id": "log-format",
|
||||
"label": "Log format",
|
||||
"regex": "^.*$",
|
||||
"type": "text",
|
||||
},
|
||||
"LOG_LEVEL": {
|
||||
"context": "global",
|
||||
"default": "notice",
|
||||
"help": "The level to use for error logs.",
|
||||
"id": "log-level",
|
||||
"label": "Log level",
|
||||
"regex": "^(debug|info|notice|warn|error|crit|alert|emerg)$",
|
||||
"type": "select",
|
||||
"select": [
|
||||
"debug",
|
||||
"info",
|
||||
"notice",
|
||||
"warn",
|
||||
"error",
|
||||
"crit",
|
||||
"alert",
|
||||
"emerg",
|
||||
],
|
||||
},
|
||||
"DNS_RESOLVERS": {
|
||||
"context": "global",
|
||||
"default": "127.0.0.11",
|
||||
"help": "DNS addresses of resolvers to use.",
|
||||
"id": "dns-resolvers",
|
||||
"label": "DNS resolvers",
|
||||
"regex": "^(?! )( *(((\\b25[0-5]|\\b2[0-4]\\d|\\b[01]?\\d\\d?)(\\.(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)){3})(\\/([1-2][0-9]?|3[0-2]?|[04-9]))?|(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]Z{0,4}){0,4}%[0-9a-zA-Z]+|::(ffff(:0{1,4})?:)?((25[0-5]|(2[0-4]|1?\\d)?\\d)\\.){3}(25[0-5]|(2[0-4]|1?\\d)?\\d)|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1?\\d)?\\d)\\.){3}(25[0-5]|(2[0-4]|1?\\d)?\\d))(\\/(12[0-8]|1[01][0-9]|[0-9][0-9]?))?)(?!.*\\D\\2([^\\d\\/]|$)) *)*$",
|
||||
"type": "text",
|
||||
},
|
||||
"DATASTORE_MEMORY_SIZE": {
|
||||
"context": "global",
|
||||
"default": "64m",
|
||||
"help": "Size of the internal datastore.",
|
||||
"id": "datastore-memory-size",
|
||||
"label": "Datastore memory size",
|
||||
"regex": "^\\d+[kKmMgG]?$",
|
||||
"type": "text",
|
||||
},
|
||||
"CACHESTORE_MEMORY_SIZE": {
|
||||
"context": "global",
|
||||
"default": "64m",
|
||||
"help": "Size of the internal cachestore.",
|
||||
"id": "cachestore-memory-size",
|
||||
"label": "Cachestore memory size",
|
||||
"regex": "^\\d+[kKmMgG]?$",
|
||||
"type": "text",
|
||||
},
|
||||
"CACHESTORE_IPC_MEMORY_SIZE": {
|
||||
"context": "global",
|
||||
"default": "16m",
|
||||
"help": "Size of the internal cachestore (ipc).",
|
||||
"id": "cachestore-ipc-memory-size",
|
||||
"label": "Cachestore ipc memory size",
|
||||
"regex": "^\\d+[kKmMgG]?$",
|
||||
"type": "text",
|
||||
},
|
||||
"CACHESTORE_MISS_MEMORY_SIZE": {
|
||||
"context": "global",
|
||||
"default": "16m",
|
||||
"help": "Size of the internal cachestore (miss).",
|
||||
"id": "cachestore-miss-memory-size",
|
||||
"label": "Cachestore miss memory size",
|
||||
"regex": "^\\d+[kKmMgG]?$",
|
||||
"type": "text",
|
||||
},
|
||||
"CACHESTORE_LOCKS_MEMORY_SIZE": {
|
||||
"context": "global",
|
||||
"default": "16m",
|
||||
"help": "Size of the internal cachestore (locks).",
|
||||
"id": "cachestore-locks-memory-size",
|
||||
"label": "Cachestore locks memory size",
|
||||
"regex": "^\\d+[kKmMgG]?$",
|
||||
"type": "text",
|
||||
},
|
||||
"USE_API": {
|
||||
"context": "global",
|
||||
"default": "yes",
|
||||
"help": "Activate the API to control BunkerWeb.",
|
||||
"id": "use-api",
|
||||
"label": "Activate API",
|
||||
"regex": "^(yes|no)$",
|
||||
"type": "check",
|
||||
},
|
||||
"API_HTTP_PORT": {
|
||||
"context": "global",
|
||||
"default": "5000",
|
||||
"help": "Listen port number for the API.",
|
||||
"id": "api-http-listen",
|
||||
"label": "API port number",
|
||||
"regex": "^\\d+$",
|
||||
"type": "text",
|
||||
},
|
||||
"API_LISTEN_IP": {
|
||||
"context": "global",
|
||||
"default": "0.0.0.0",
|
||||
"help": "Listen IP address for the API.",
|
||||
"id": "api-ip-listen",
|
||||
"label": "API listen IP",
|
||||
"regex": "^.*$",
|
||||
"type": "text",
|
||||
},
|
||||
"API_SERVER_NAME": {
|
||||
"context": "global",
|
||||
"default": "bwapi",
|
||||
"help": "Server name (virtual host) for the API.",
|
||||
"id": "api-server-name",
|
||||
"label": "API server name",
|
||||
"regex": "^(?=.{1,255}$)[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?(?:\\.[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?)*\\.?$",
|
||||
"type": "text",
|
||||
},
|
||||
"API_WHITELIST_IP": {
|
||||
"context": "global",
|
||||
"default": "127.0.0.0/8",
|
||||
"help": "List of IP/network allowed to contact the API.",
|
||||
"id": "api-whitelist-ip",
|
||||
"label": "API whitelist IP",
|
||||
"regex": "^(?! )( *(((\\b25[0-5]|\\b2[0-4]\\d|\\b[01]?\\d\\d?)(\\.(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)){3})(\\/([1-2][0-9]?|3[0-2]?|[04-9]))?|(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]Z{0,4}){0,4}%[0-9a-zA-Z]+|::(ffff(:0{1,4})?:)?((25[0-5]|(2[0-4]|1?\\d)?\\d)\\.){3}(25[0-5]|(2[0-4]|1?\\d)?\\d)|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1?\\d)?\\d)\\.){3}(25[0-5]|(2[0-4]|1?\\d)?\\d))(\\/(12[0-8]|1[01][0-9]|[0-9][0-9]?))?)(?!.*\\D\\2([^\\d\\/]|$)) *)*$",
|
||||
"type": "text",
|
||||
},
|
||||
"AUTOCONF_MODE": {
|
||||
"context": "global",
|
||||
"default": "no",
|
||||
"help": "Enable Autoconf Docker integration.",
|
||||
"id": "autoconf-mode",
|
||||
"label": "Autoconf mode",
|
||||
"regex": "^(yes|no)$",
|
||||
"type": "check",
|
||||
},
|
||||
"SWARM_MODE": {
|
||||
"context": "global",
|
||||
"default": "no",
|
||||
"help": "Enable Docker Swarm integration.",
|
||||
"id": "swarm-mode",
|
||||
"label": "Swarm mode",
|
||||
"regex": "^(yes|no)$",
|
||||
"type": "check",
|
||||
},
|
||||
"KUBERNETES_MODE": {
|
||||
"context": "global",
|
||||
"default": "no",
|
||||
"help": "Enable Kubernetes integration.",
|
||||
"id": "kubernetes-mode",
|
||||
"label": "Kubernetes mode",
|
||||
"regex": "^(yes|no)$",
|
||||
"type": "check",
|
||||
},
|
||||
"SERVER_TYPE": {
|
||||
"context": "multisite",
|
||||
"default": "http",
|
||||
"help": "Server type : http or stream.",
|
||||
"id": "server-type",
|
||||
"label": "Server type",
|
||||
"regex": "^(http|stream)$",
|
||||
"type": "select",
|
||||
"select": ["http", "stream"],
|
||||
},
|
||||
"LISTEN_STREAM": {
|
||||
"context": "multisite",
|
||||
"default": "yes",
|
||||
"help": "Enable listening for non-ssl (passthrough).",
|
||||
"id": "listen-stream",
|
||||
"label": "Listen stream",
|
||||
"regex": "^(yes|no)$",
|
||||
"type": "check",
|
||||
},
|
||||
"LISTEN_STREAM_PORT": {
|
||||
"context": "multisite",
|
||||
"default": "1337",
|
||||
"help": "Listening port for non-ssl (passthrough).",
|
||||
"id": "listen-stream-port",
|
||||
"label": "Listen stream port",
|
||||
"regex": "^[0-9]+$",
|
||||
"type": "text",
|
||||
},
|
||||
"LISTEN_STREAM_PORT_SSL": {
|
||||
"context": "multisite",
|
||||
"default": "4242",
|
||||
"help": "Listening port for ssl (passthrough).",
|
||||
"id": "listen-stream-port-ssl",
|
||||
"label": "Listen stream port ssl",
|
||||
"regex": "^[0-9]+$",
|
||||
"type": "text",
|
||||
},
|
||||
"USE_UDP": {
|
||||
"context": "multisite",
|
||||
"default": "no",
|
||||
"help": "UDP listen instead of TCP (stream).",
|
||||
"id": "use-udp",
|
||||
"label": "Listen UDP",
|
||||
"regex": "^(yes|no)$",
|
||||
"type": "check",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
for filename in iglob(join("core", "*", "plugin.json")):
|
||||
with open(filename, "r") as f:
|
||||
data = load(f)
|
||||
data["checked"] = False
|
||||
for x, job in enumerate(data.get("jobs", [])):
|
||||
data["jobs"][x]["checked"] = False
|
||||
data["page_checked"] = not Path(f"{dirname(filename)}/ui").exists() or False
|
||||
core_plugins[data.pop("id")] = data
|
||||
|
||||
external_plugins = {}
|
||||
for filename in iglob(join("external", "*", "plugin.json")):
|
||||
with open(filename, "r") as f:
|
||||
data = load(f)
|
||||
data["checked"] = False
|
||||
for x, job in enumerate(data.get("jobs", [])):
|
||||
data["jobs"][x]["checked"] = False
|
||||
data["page_checked"] = not Path(f"{dirname(filename)}/ui").exists() or False
|
||||
external_plugins[data.pop("id")] = data
|
||||
|
||||
with db_session() as session:
|
||||
plugins = (
|
||||
session.query(Plugins)
|
||||
.with_entities(
|
||||
Plugins.id,
|
||||
Plugins.order,
|
||||
Plugins.name,
|
||||
Plugins.description,
|
||||
Plugins.version,
|
||||
Plugins.stream,
|
||||
Plugins.external,
|
||||
Plugins.method,
|
||||
)
|
||||
.all()
|
||||
)
|
||||
|
||||
for plugin in plugins:
|
||||
if not plugin.external and plugin.id in core_plugins:
|
||||
current_plugin = core_plugins
|
||||
elif plugin.external and plugin.id in external_plugins:
|
||||
current_plugin = external_plugins
|
||||
else:
|
||||
print(
|
||||
f"❌ The {'external' if plugin.external else 'core'} plugin {plugin.name} (id: {plugin.id}) is in the database but should not be, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if (
|
||||
plugin.order != current_plugin[plugin.id]["order"]
|
||||
or plugin.name != current_plugin[plugin.id]["name"]
|
||||
or plugin.description != current_plugin[plugin.id]["description"]
|
||||
or plugin.version != current_plugin[plugin.id]["version"]
|
||||
or plugin.stream != current_plugin[plugin.id]["stream"]
|
||||
):
|
||||
print(
|
||||
f"❌ The {'external' if plugin.external else 'core'} plugin {plugin.name} (id: {plugin.id}) is in the database but is not correct, exiting ...\n{dumps({'order': plugin.order, 'name': plugin.name, 'description': plugin.description, 'version': plugin.version, 'stream': plugin.stream})} (database) != {dumps({'order': current_plugin[plugin.id]['order'], 'name': current_plugin[plugin.id]['name'], 'description': current_plugin[plugin.id]['description'], 'version': current_plugin[plugin.id]['version'], 'stream': current_plugin[plugin.id]['stream']})} (file)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
else:
|
||||
settings = session.query(Settings).filter_by(plugin_id=plugin.id).all()
|
||||
|
||||
for setting in settings:
|
||||
if (
|
||||
setting.name
|
||||
!= current_plugin[plugin.id]["settings"][setting.id]["id"]
|
||||
or setting.context
|
||||
!= current_plugin[plugin.id]["settings"][setting.id]["context"]
|
||||
or setting.default
|
||||
!= current_plugin[plugin.id]["settings"][setting.id]["default"]
|
||||
or setting.help
|
||||
!= current_plugin[plugin.id]["settings"][setting.id]["help"]
|
||||
or setting.label
|
||||
!= current_plugin[plugin.id]["settings"][setting.id]["label"]
|
||||
or setting.regex
|
||||
!= current_plugin[plugin.id]["settings"][setting.id]["regex"]
|
||||
or setting.type
|
||||
!= current_plugin[plugin.id]["settings"][setting.id]["type"]
|
||||
or setting.multiple
|
||||
!= current_plugin[plugin.id]["settings"][setting.id].get(
|
||||
"multiple", None
|
||||
)
|
||||
):
|
||||
print(
|
||||
f"❌ The {'external' if plugin.external else 'core'} plugin {plugin.name} (id: {plugin.id}) is in the database but is not correct, exiting ...\n{dumps({'default': setting.default, 'help': setting.help, 'label': setting.label, 'regex': setting.regex, 'type': setting.type})} (database) != {dumps({'default': current_plugin[plugin.id]['settings'][setting.id]['default'], 'help': current_plugin[plugin.id]['settings'][setting.id]['help'], 'label': current_plugin[plugin.id]['settings'][setting.id]['label'], 'regex': current_plugin[plugin.id]['settings'][setting.id]['regex'], 'type': current_plugin[plugin.id]['settings'][setting.id]['type']})} (file)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
current_plugin[plugin.id]["checked"] = True
|
||||
|
||||
if not all([core_plugins[plugin]["checked"] for plugin in core_plugins]):
|
||||
print(
|
||||
f"❌ Not all core plugins are in the database, exiting ...\nmissing plugins: {', '.join([plugin for plugin in core_plugins if not core_plugins[plugin]])}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif not all([external_plugins[plugin]["checked"] for plugin in external_plugins]):
|
||||
print(
|
||||
f"❌ Not all external plugins are in the database, exiting ...\nmissing plugins: {', '.join([plugin for plugin in external_plugins if not external_plugins[plugin]])}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ The ClamAV plugin and all core plugins are in the database", flush=True)
|
||||
print(" ", flush=True)
|
||||
print("ℹ️ Checking if the jobs are in the database ...", flush=True)
|
||||
|
||||
with db_session() as session:
|
||||
jobs = session.query(Jobs).all()
|
||||
|
||||
for job in jobs:
|
||||
if not job.success:
|
||||
print(
|
||||
f"❌ The job {job.name} (plugin_id: {job.plugin_id}) is in the database but failed, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if job.plugin_id in core_plugins:
|
||||
current_plugin = core_plugins
|
||||
elif job.plugin_id in external_plugins:
|
||||
current_plugin = external_plugins
|
||||
else:
|
||||
print(
|
||||
f"❌ The job {job.name} (plugin_id: {job.plugin_id}) is in the database but should not be, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
index = next(
|
||||
index
|
||||
for (index, d) in enumerate(
|
||||
current_plugin[job.plugin_id].get("jobs", [])
|
||||
)
|
||||
if d["name"] == job.name
|
||||
)
|
||||
core_job = current_plugin[job.plugin_id]["jobs"][index]
|
||||
|
||||
if (
|
||||
job.name != core_job["name"]
|
||||
or job.file_name != core_job["file"]
|
||||
or job.every != core_job["every"]
|
||||
or job.reload != core_job["reload"]
|
||||
):
|
||||
print(
|
||||
f"❌ The job {job.name} (plugin_id: {job.plugin_id}) is in the database but is not correct, exiting ...\n{dumps({'name': job.name, 'file': job.file_name, 'every': job.every, 'reload': job.reload})} (database) != {dumps({'name': core_job['name'], 'file': core_job['file'], 'every': core_job['every'], 'reload': core_job['reload']})} (file)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
current_plugin[job.plugin_id]["jobs"][index]["checked"] = True
|
||||
|
||||
if not all(
|
||||
[
|
||||
all([job["checked"] for job in core_plugins[plugin].get("jobs", [])])
|
||||
for plugin in core_plugins
|
||||
]
|
||||
):
|
||||
print(
|
||||
f"❌ Not all jobs from core plugins are in the database, exiting ...\nmissing jobs: {dumps({plugin: [job['name'] for job in core_plugins[plugin]['jobs'] if not job['checked']] for plugin in core_plugins})}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif not all(
|
||||
[
|
||||
all([job["checked"] for job in external_plugins[plugin].get("jobs", [])])
|
||||
for plugin in external_plugins
|
||||
]
|
||||
):
|
||||
print(
|
||||
f"❌ Not all jobs from external plugins are in the database, exiting ...\nmissing jobs: {dumps({plugin: [job['name'] for job in external_plugins[plugin]['jobs'] if not job['checked']] for plugin in external_plugins})}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ All jobs are in the database and have successfully ran", flush=True)
|
||||
print(" ", flush=True)
|
||||
print("ℹ️ Checking if all plugin pages are in the database ...", flush=True)
|
||||
|
||||
def file_hash(file: str) -> str:
|
||||
_sha512 = sha512()
|
||||
with open(file, "rb") as f:
|
||||
while True:
|
||||
data = f.read(1024)
|
||||
if not data:
|
||||
break
|
||||
_sha512.update(data)
|
||||
return _sha512.hexdigest()
|
||||
|
||||
with db_session() as session:
|
||||
plugin_pages = (
|
||||
session.query(Plugin_pages)
|
||||
.with_entities(
|
||||
Plugin_pages.id,
|
||||
Plugin_pages.plugin_id,
|
||||
Plugin_pages.template_checksum,
|
||||
Plugin_pages.actions_checksum,
|
||||
)
|
||||
.all()
|
||||
)
|
||||
|
||||
for plugin_page in plugin_pages:
|
||||
if plugin_page.plugin_id in core_plugins:
|
||||
current_plugin = core_plugins
|
||||
elif plugin_page.plugin_id in external_plugins:
|
||||
current_plugin = external_plugins
|
||||
else:
|
||||
print(
|
||||
f"❌ The plugin page from {plugin_page.plugin_id} is in the database but should not be, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
path_ui = (
|
||||
Path(join("core", plugin_page.plugin_id, "ui"))
|
||||
if Path(join("core", plugin_page.plugin_id, "ui")).exists()
|
||||
else Path(join("external", plugin_page.plugin_id, "ui"))
|
||||
)
|
||||
|
||||
if not path_ui.exists():
|
||||
print(
|
||||
f'❌ The plugin page from {plugin_page.plugin_id} is in the database but should not be because the "ui" folder is missing from the plugin, exiting ...',
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
template_checksum = file_hash(f"{path_ui}/template.html")
|
||||
actions_checksum = file_hash(f"{path_ui}/actions.py")
|
||||
|
||||
if plugin_page.template_checksum != template_checksum:
|
||||
print(
|
||||
f"❌ The plugin page from {plugin_page.plugin_id} is in the database but the template file checksum differ, exiting ...\n{plugin_page.template_checksum} (database) != {template_checksum} (file)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif plugin_page.actions_checksum != actions_checksum:
|
||||
print(
|
||||
f"❌ The plugin page from {plugin_page.plugin_id} is in the database but the actions file checksum differ, exiting ...\n{plugin_page.actions_checksum} (database) != {actions_checksum} (file)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
current_plugin[plugin_page.plugin_id]["page_checked"] = True
|
||||
|
||||
if not all([core_plugins[plugin]["page_checked"] for plugin in core_plugins]):
|
||||
print(
|
||||
f"❌ Not all core plugins pages are in the database, exiting ...\nmissing plugins pages: {', '.join([plugin for plugin in core_plugins if not core_plugins[plugin]['page_checked']])}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif not all(
|
||||
[external_plugins[plugin]["page_checked"] for plugin in external_plugins]
|
||||
):
|
||||
print(
|
||||
f"❌ Not all external plugins pages are in the database, exiting ...\nmissing plugins pages: {', '.join([plugin for plugin in external_plugins if not external_plugins[plugin]['page_checked']])}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ All plugin pages are in the database and have the right value", flush=True)
|
||||
print(" ", flush=True)
|
||||
print("ℹ️ Checking if all custom configs are in the database ...", flush=True)
|
||||
|
||||
custom_confs_rx = re_compile(
|
||||
r"^([0-9a-z\.-]*)_?CUSTOM_CONF_(SERVICE_)?(HTTP|SERVER_STREAM|STREAM|DEFAULT_SERVER_HTTP|SERVER_HTTP|MODSEC_CRS|MODSEC)_(.+)$"
|
||||
)
|
||||
|
||||
global_custom_configs = {}
|
||||
service_custom_configs = {}
|
||||
for env in environ:
|
||||
if not custom_confs_rx.match(env):
|
||||
continue
|
||||
|
||||
custom_conf = custom_confs_rx.search(env).groups()
|
||||
if custom_conf[1]:
|
||||
service_custom_configs[custom_conf[3]] = {
|
||||
"value": environ[env].encode(),
|
||||
"type": custom_conf[2].lower(),
|
||||
"method": "scheduler",
|
||||
"checked": False,
|
||||
}
|
||||
continue
|
||||
|
||||
global_custom_configs[custom_conf[3]] = {
|
||||
"value": environ[env].encode(),
|
||||
"type": custom_conf[2].lower(),
|
||||
"method": "scheduler",
|
||||
"checked": False,
|
||||
}
|
||||
|
||||
with db_session() as session:
|
||||
custom_configs = (
|
||||
session.query(Custom_configs)
|
||||
.with_entities(
|
||||
Custom_configs.service_id,
|
||||
Custom_configs.type,
|
||||
Custom_configs.name,
|
||||
Custom_configs.data,
|
||||
Custom_configs.method,
|
||||
)
|
||||
.all()
|
||||
)
|
||||
|
||||
for custom_config in custom_configs:
|
||||
if (
|
||||
not multisite
|
||||
and custom_config.name in global_custom_configs
|
||||
and custom_config.service_id
|
||||
):
|
||||
print(
|
||||
f"❌ The custom config {custom_config.name} is in the database but should not be owned by the service {custom_config.service_id} because multisite is not enabled, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif (
|
||||
multisite
|
||||
and custom_config.name in service_custom_configs
|
||||
and not custom_config.service_id
|
||||
):
|
||||
print(
|
||||
f"❌ The custom config {custom_config.name} is in the database but should be owned by the service bwadm.example.com because it's a service config, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if custom_config.name in global_custom_configs:
|
||||
current_custom_configs = global_custom_configs
|
||||
elif custom_config.name in service_custom_configs:
|
||||
current_custom_configs = service_custom_configs
|
||||
else:
|
||||
print(
|
||||
f"❌ The custom config {custom_config.name} is in the database but should not be, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if custom_config.type != current_custom_configs[custom_config.name]["type"]:
|
||||
print(
|
||||
f"❌ The custom config {custom_config.name} is in the database but the type differ, exiting ...\n{custom_config.type} (database) != {current_custom_configs[custom_config.name]['type']} (env)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif (
|
||||
custom_config.data
|
||||
!= current_custom_configs[custom_config.name]["value"]
|
||||
):
|
||||
print(
|
||||
f"❌ The custom config {custom_config.name} is in the database but the value differ, exiting ...\n{custom_config.data} (database) != {current_custom_configs[custom_config.name]['value']} (env)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif (
|
||||
custom_config.method
|
||||
!= current_custom_configs[custom_config.name]["method"]
|
||||
):
|
||||
print(
|
||||
f"❌ The custom config {custom_config.name} is in the database but the method differ, exiting ...\n{custom_config.method} (database) != {current_custom_configs[custom_config.name]['method']} (env)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
current_custom_configs[custom_config.name]["checked"] = True
|
||||
|
||||
if not all(
|
||||
[
|
||||
global_custom_configs[custom_config]["checked"]
|
||||
for custom_config in global_custom_configs
|
||||
]
|
||||
):
|
||||
print(
|
||||
f"❌ Not all global custom configs are in the database, exiting ...\nmissing custom configs: {', '.join([custom_config for custom_config in global_custom_configs if not global_custom_configs[custom_config]['checked']])}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif not all(
|
||||
[
|
||||
service_custom_configs[custom_config]["checked"]
|
||||
for custom_config in service_custom_configs
|
||||
]
|
||||
):
|
||||
print(
|
||||
f"❌ Not all service custom configs are in the database, exiting ...\nmissing custom configs: {', '.join([custom_config for custom_config in service_custom_configs if not service_custom_configs[custom_config]['checked']])}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print(
|
||||
"✅ All custom configs are in the database and have the right value", flush=True
|
||||
)
|
||||
except SystemExit:
|
||||
exit(1)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1,4 @@
|
|||
sqlalchemy==2.0.13
|
||||
psycopg2-binary==2.9.6
|
||||
PyMySQL==1.0.3
|
||||
cryptography==40.0.2
|
|
@ -0,0 +1,168 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "💾 Building db stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker app1 bw-maria-db bw-mysql-db bw-postgres-db
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "💾 Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
rm -rf init/plugins
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@DATABASE_URI: ".*"$@DATABASE_URI: "sqlite:////var/lib/bunkerweb/db.sqlite3"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@MULTISITE: "yes"$@MULTISITE: "no"@' {} \;
|
||||
sed -i 's@bwadm.example.com_USE_REVERSE_PROXY@USE_REVERSE_PROXY@' docker-compose.yml
|
||||
sed -i 's@bwadm.example.com_REVERSE_PROXY_HOST@REVERSE_PROXY_HOST@' docker-compose.yml
|
||||
sed -i 's@bwadm.example.com_REVERSE_PROXY_URL@REVERSE_PROXY_URL@' docker-compose.yml
|
||||
sed -i 's@SERVICE_USE_REVERSE_PROXY@GLOBAL_USE_REVERSE_PROXY@' docker-compose.test.yml
|
||||
sed -i 's@SERVICE_REVERSE_PROXY_HOST@GLOBAL_REVERSE_PROXY_HOST@' docker-compose.test.yml
|
||||
sed -i 's@SERVICE_REVERSE_PROXY_URL@GLOBAL_REVERSE_PROXY_URL@' docker-compose.test.yml
|
||||
|
||||
if [[ $(sed '20!d' docker-compose.yml) = ' bwadm.example.com_SERVER_NAME: "bwadm.example.com"' ]] ; then
|
||||
sed -i '20d' docker-compose.yml
|
||||
fi
|
||||
|
||||
if [[ $(sed '24!d' docker-compose.yml) = " bwadm.example.com_CUSTOM_CONF_MODSEC_CRS_test_service_conf: 'SecRule REQUEST_FILENAME \"@rx ^/test\" \"id:2,ctl:ruleRemoveByTag=attack-generic,ctl:ruleRemoveByTag=attack-protocol,nolog\"'" ]] ; then
|
||||
sed -i '24d' docker-compose.yml
|
||||
fi
|
||||
|
||||
if [[ $(sed '18!d' docker-compose.test.yml) = ' SERVICE_SERVER_NAME: "bwadm.example.com"' ]] ; then
|
||||
sed -i '18d' docker-compose.test.yml
|
||||
fi
|
||||
|
||||
if [[ $(sed '23!d' docker-compose.test.yml) = " CUSTOM_CONF_SERVICE_MODSEC_CRS_test_service_conf: 'SecRule REQUEST_FILENAME \"@rx ^/test\" \"id:2,ctl:ruleRemoveByTag=attack-generic,ctl:ruleRemoveByTag=attack-protocol,nolog\"'" ]] ; then
|
||||
sed -i '23d' docker-compose.test.yml
|
||||
fi
|
||||
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "💾 Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "💾 Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "💾 Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
echo "💾 Initializing workspace ..."
|
||||
rm -rf init/plugins
|
||||
mkdir -p init/plugins
|
||||
docker compose -f docker-compose.init.yml up --build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "💾 Build failed ❌"
|
||||
exit 1
|
||||
elif ! [[ -d "init/plugins/clamav" ]]; then
|
||||
echo "💾 ClamAV plugin not found ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "💾 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for test in "local" "multisite" "mariadb" "mysql" "postgres"
|
||||
do
|
||||
if [ "$test" = "local" ] ; then
|
||||
echo "💾 Running tests with a local database ..."
|
||||
elif [ "$test" = "multisite" ] ; then
|
||||
echo "💾 Running tests with MULTISITE set to yes and with multisite settings ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@MULTISITE: "no"$@MULTISITE: "yes"@' {} \;
|
||||
sed -i '20i \ bwadm.example.com_SERVER_NAME: "bwadm.example.com"' docker-compose.yml
|
||||
sed -i "25i \ bwadm.example.com_CUSTOM_CONF_MODSEC_CRS_test_service_conf: 'SecRule REQUEST_FILENAME \"@rx ^/test\" \"id:2,ctl:ruleRemoveByTag=attack-generic,ctl:ruleRemoveByTag=attack-protocol,nolog\"'" docker-compose.yml
|
||||
sed -i 's@USE_REVERSE_PROXY@bwadm.example.com_USE_REVERSE_PROXY@' docker-compose.yml
|
||||
sed -i 's@REVERSE_PROXY_HOST@bwadm.example.com_REVERSE_PROXY_HOST@' docker-compose.yml
|
||||
sed -i 's@REVERSE_PROXY_URL@bwadm.example.com_REVERSE_PROXY_URL@' docker-compose.yml
|
||||
sed -i '18i \ SERVICE_SERVER_NAME: "bwadm.example.com"' docker-compose.test.yml
|
||||
sed -i "24i \ CUSTOM_CONF_SERVICE_MODSEC_CRS_test_service_conf: 'SecRule REQUEST_FILENAME \"@rx ^/test\" \"id:2,ctl:ruleRemoveByTag=attack-generic,ctl:ruleRemoveByTag=attack-protocol,nolog\"'" docker-compose.test.yml
|
||||
sed -i 's@GLOBAL_USE_REVERSE_PROXY@SERVICE_USE_REVERSE_PROXY@' docker-compose.test.yml
|
||||
sed -i 's@GLOBAL_REVERSE_PROXY_HOST@SERVICE_REVERSE_PROXY_HOST@' docker-compose.test.yml
|
||||
sed -i 's@GLOBAL_REVERSE_PROXY_URL@SERVICE_REVERSE_PROXY_URL@' docker-compose.test.yml
|
||||
elif [ "$test" = "mariadb" ] ; then
|
||||
echo "💾 Running tests with MariaDB database ..."
|
||||
echo "ℹ️ Keeping the MULTISITE variable to yes and multisite settings ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@DATABASE_URI: ".*"$@DATABASE_URI: "mariadb+pymysql://bunkerweb:secret\@bw-maria-db:3306/db"@' {} \;
|
||||
elif [ "$test" = "mysql" ] ; then
|
||||
echo "💾 Running tests with MySQL database ..."
|
||||
echo "ℹ️ Keeping the MULTISITE variable to yes and multisite settings ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@DATABASE_URI: ".*"$@DATABASE_URI: "mysql+pymysql://bunkerweb:secret\@bw-mysql-db:3306/db"@' {} \;
|
||||
elif [ "$test" = "postgres" ] ; then
|
||||
echo "💾 Running tests with PostgreSQL database ..."
|
||||
echo "ℹ️ Keeping the MULTISITE variable to yes and multisite settings ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@DATABASE_URI: ".*"$@DATABASE_URI: "postgresql://bunkerweb:secret\@bw-postgres-db:5432/db"@' {} \;
|
||||
fi
|
||||
|
||||
echo "💾 Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "💾 Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "💾 Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("db-bw-1" "db-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "💾 Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "💾 Docker stack is not healthy ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
docker compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "💾 Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "💾 Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "💾 Tests are done ! ✅"
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,9 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
init:
|
||||
build: init
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
volumes:
|
||||
- ./init/output:/output
|
|
@ -0,0 +1,18 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_DNSBL: "yes"
|
||||
DNSBL_LIST: "bl.blocklist.de problems.dnsbl.sorbs.net"
|
||||
extra_hosts:
|
||||
- "www.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
|
@ -0,0 +1,65 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
ports:
|
||||
- 80:80
|
||||
- 443:443
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
volumes:
|
||||
- ./index.html:/var/www/html/index.html
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24 192.168.0.3"
|
||||
HTTP_PORT: "80"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? DNSBL settings
|
||||
USE_DNSBL: "yes"
|
||||
DNSBL_LIST: "bl.blocklist.de problems.dnsbl.sorbs.net"
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-docker:
|
||||
name: bw-docker
|
|
@ -0,0 +1,25 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
# Install firefox and geckodriver
|
||||
RUN apk add --no-cache --virtual .build-deps curl grep zip && \
|
||||
apk add --no-cache firefox
|
||||
|
||||
# Installing geckodriver for firefox...
|
||||
RUN GECKODRIVER_VERSION=`curl -i https://github.com/mozilla/geckodriver/releases/latest | grep -Po 'v[0-9]+.[0-9]+.[0-9]+'` && \
|
||||
wget -O geckodriver.tar.gz https://github.com/mozilla/geckodriver/releases/download/$GECKODRIVER_VERSION/geckodriver-$GECKODRIVER_VERSION-linux64.tar.gz && \
|
||||
tar -C /usr/local/bin -xzvf geckodriver.tar.gz && \
|
||||
chmod +x /usr/local/bin/geckodriver && \
|
||||
rm geckodriver.tar.gz
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,59 @@
|
|||
from contextlib import suppress
|
||||
from ipaddress import IPv4Address
|
||||
from pathlib import Path
|
||||
from traceback import format_exc
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.firefox.options import Options
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
from selenium.webdriver.remote.webelement import WebElement
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from socket import gaierror, gethostbyname
|
||||
from typing import List
|
||||
|
||||
try:
|
||||
firefox_options = Options()
|
||||
firefox_options.add_argument("--headless")
|
||||
|
||||
dnsbl_servers = []
|
||||
|
||||
print("ℹ️ Starting Firefox ...", flush=True)
|
||||
with webdriver.Firefox(options=firefox_options) as driver:
|
||||
driver.delete_all_cookies()
|
||||
driver.maximize_window()
|
||||
driver_wait = WebDriverWait(driver, 10)
|
||||
|
||||
print("ℹ️ Navigating to https://www.dnsbl.info/dnsbl-list.php ...", flush=True)
|
||||
driver.get("https://www.dnsbl.info/dnsbl-list.php")
|
||||
|
||||
print("ℹ️ Getting the DNSBL servers ...")
|
||||
links: List[WebElement] = driver_wait.until(
|
||||
EC.presence_of_all_elements_located(
|
||||
(By.XPATH, "//table[@class='body_sub_body']//td")
|
||||
)
|
||||
)
|
||||
|
||||
for link in links:
|
||||
content = link.text
|
||||
if content:
|
||||
dnsbl_servers.append(content)
|
||||
|
||||
print("ℹ️ Checking the DNSBL servers for a banned IP ...", flush=True)
|
||||
|
||||
for ip_address in [IPv4Address(f"{x}.0.0.3") for x in range(1, 256)]:
|
||||
for dnsbl_server in dnsbl_servers:
|
||||
with suppress(gaierror):
|
||||
gethostbyname(
|
||||
f"{ip_address.reverse_pointer.replace('.in-addr.arpa', '')}.{dnsbl_server}"
|
||||
)
|
||||
print(
|
||||
f"✅ {ip_address} is banned on {dnsbl_server}, saving it to /output/dnsbl_ip.txt",
|
||||
flush=True,
|
||||
)
|
||||
Path("/output/dnsbl_ip.txt").write_text(f"{ip_address} {dnsbl_server}")
|
||||
exit(0)
|
||||
except SystemExit as e:
|
||||
exit(e.code)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1 @@
|
|||
selenium==4.9.1
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue