Merge branch 'dev' of github.com:bunkerity/bunkerweb into dev
This commit is contained in:
commit
e5e336c4f3
|
@ -1,5 +1,5 @@
|
|||
mkdocs==1.4.3
|
||||
mkdocs-material==9.1.11
|
||||
mkdocs-material==9.1.12
|
||||
pytablewriter==0.64.2
|
||||
mike==1.1.2
|
||||
jinja2<3.1.0
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
sqlalchemy==2.0.12
|
||||
sqlalchemy==2.0.13
|
||||
psycopg2-binary==2.9.6
|
||||
PyMySQL==1.0.3
|
||||
cryptography==40.0.2
|
||||
|
|
|
@ -225,48 +225,48 @@ pymysql==1.0.3 \
|
|||
--hash=sha256:3dda943ef3694068a75d69d071755dbecacee1adf9a1fc5b206830d2b67d25e8 \
|
||||
--hash=sha256:89fc6ae41c0aeb6e1f7710cdd623702ea2c54d040565767a78b00a5ebb12f4e5
|
||||
# via -r requirements.in
|
||||
sqlalchemy==2.0.12 \
|
||||
--hash=sha256:03206576ca53f55b9de6e890273e498f4b2e6e687a9db9859bdcd21df5a63e53 \
|
||||
--hash=sha256:09205893a84b6bedae0453d3f384f5d2a6499b6e45ad977549894cdcd85d8f1c \
|
||||
--hash=sha256:0e5501c78b5ab917f0f0f75ce7f0018f683a0a76e95f30e6561bf61c9ff69d43 \
|
||||
--hash=sha256:10f1ff0ebe21d2cea89ead231ba3ecf75678463ab85f19ce2ce91207620737f3 \
|
||||
--hash=sha256:1fac17c866111283cbcdb7024d646abb71fdd95f3ce975cf3710258bc55742fd \
|
||||
--hash=sha256:297b752d4f30350b64175bbbd57dc94c061a35f5d1dba088d0a367dbbebabc94 \
|
||||
--hash=sha256:2a3101252f3de9a18561c1fb0a68b1ee465485990aba458d4510f214bd5a582c \
|
||||
--hash=sha256:32762dba51b663609757f861584a722093487f53737e76474cc6e190904dc31b \
|
||||
--hash=sha256:369f6564e68a9c60f0b9dde121def491e651a4ba8dcdd652a93f1cd5977cd85c \
|
||||
--hash=sha256:3745dee26a7ee012598577ad3b8f6e6cd50a49b2afa0cde9db668da6bf2c2319 \
|
||||
--hash=sha256:3c053c3f4c4e45d4c8b27977647566c140d6de3f61a4e2acb92ea24cf9911c7f \
|
||||
--hash=sha256:4ad525b9dd17b478a2ed8580d7f2bc46b0f5889153c6b1c099729583e395b4b9 \
|
||||
--hash=sha256:53b2c8adbcbb59732fb21a024aaa261983655845d86e3fc26a5676cec0ebaa09 \
|
||||
--hash=sha256:5d709f43caee115b03b707b8cbbcb8b303045dd7cdc825b6d29857d71f3425ae \
|
||||
--hash=sha256:5e9d390727c11b9a7e583bf6770de36895c0936bddb98ae93ae99282e6428d5f \
|
||||
--hash=sha256:6b1fa0ffc378a7061c452cb4a1f804fad1b3b8aa8d0552725531d27941b2e3ed \
|
||||
--hash=sha256:6e1d50592cb24d1947c374c666add65ded7c181ec98a89ed17abbe9b8b2e2ff4 \
|
||||
--hash=sha256:77a06b0983faf9aa48ee6219d41ade39dee16ce90857cc181dbcf6918acd234d \
|
||||
--hash=sha256:7eb25b981cbc9e7df9f56ad7ec4c6d77323090ca4b7147fcdc09d66535377759 \
|
||||
--hash=sha256:85b0efe1c71459ba435a6593f54a0e39334b16ba383e8010fdb9d0127ca51ba8 \
|
||||
--hash=sha256:87b2c2d13c3d1384859b60eabb3139e169ce68ada1d2963dbd0c7af797f16efe \
|
||||
--hash=sha256:8aad66215a3817a7a1d535769773333250de2653c89b53f7e2d42b677d398027 \
|
||||
--hash=sha256:91f4b1bdc987ef85fe3a0ce5d26ac72ff8f60207b08272aa2a65494836391d69 \
|
||||
--hash=sha256:978bee4ecbcdadf087220618409fb9be9509458df479528b70308f0599c7c519 \
|
||||
--hash=sha256:9fe98e9d26778d7711ceee2c671741b4f54c74677668481d733d6f70747d7690 \
|
||||
--hash=sha256:a022c588c0f413f8cddf9fcc597dbf317efeac4186d8bff9aa7f3219258348b0 \
|
||||
--hash=sha256:a4709457f1c317e347051498b91fa2b86c4bcdebf93c84e6d121a4fc8a397307 \
|
||||
--hash=sha256:aec5fb36b53125554ecc2285526eb5cc31b21f6cb059993c1c5ca831959de052 \
|
||||
--hash=sha256:b6ceca432ce88ad12aab5b5896c343a1993c90b325d9193dcd055e73e18a0439 \
|
||||
--hash=sha256:b76c2fde827522e21922418325c1b95c2d795cdecfb4bc261e4d37965199ee7f \
|
||||
--hash=sha256:bddfc5bd1dee5db0fddc9dab26f800c283f3243e7281bbf107200fed30125f9c \
|
||||
--hash=sha256:bf83700faa9642388fbd3167db3f6cbb2e88cc8367b8c22204f3f408ee782d25 \
|
||||
--hash=sha256:c5268ec05c21e2ecf5bca09314bcaadfec01f02163088cd602db4379862958dd \
|
||||
--hash=sha256:d9796d5c13b2b7f05084d0ce52528cf919f9bde9e0f10672a6393a4490415695 \
|
||||
--hash=sha256:dc67efd00ce7f428a446ce012673c03c63c5abb5dec3f33750087b8bdc173bf0 \
|
||||
--hash=sha256:dfd6385b662aea83e63dd4db5fe116eb11914022deb1745f0b57fa8470c18ffe \
|
||||
--hash=sha256:e495ad05a13171fbb5d72fe5993469c8bceac42bcf6b8f9f117a518ee7fbc353 \
|
||||
--hash=sha256:e752c34f7a2057ebe82c856698b9f277c633d4aad006bddf7af74598567c8931 \
|
||||
--hash=sha256:f0843132168b44ca33c5e5a2046c954775dde8c580ce27f5cf2e134d0d9919e4 \
|
||||
--hash=sha256:f30c5608c64fc9c1fa9a16277eb4784f782362566fe40ff8d283358c8f2c5fe0 \
|
||||
--hash=sha256:f6ebadefc4331dda83c22519e1ea1e61104df6eb38abbb80ab91b0a8527a5c19
|
||||
sqlalchemy==2.0.13 \
|
||||
--hash=sha256:0aa2cbde85a6eab9263ab480f19e8882d022d30ebcdc14d69e6a8d7c07b0a871 \
|
||||
--hash=sha256:0d6979c9707f8b82366ba34b38b5a6fe32f75766b2e901f9820e271e95384070 \
|
||||
--hash=sha256:0eb14a386a5b610305bec6639b35540b47f408b0a59f75999199aed5b3d40079 \
|
||||
--hash=sha256:2424a84f131901fbb20a99844d47b38b517174c6e964c8efb15ea6bb9ced8c2b \
|
||||
--hash=sha256:2ad9688debf1f0ae9c6e0706a4e2d33b1a01281317cee9bd1d7eef8020c5baac \
|
||||
--hash=sha256:2f0a355264af0952570f18457102984e1f79510f856e5e0ae652e63316d1ca23 \
|
||||
--hash=sha256:31f72bb300eed7bfdb373c7c046121d84fa0ae6f383089db9505ff553ac27cef \
|
||||
--hash=sha256:375b7ba88f261dbd79d044f20cbcd919d88befb63f26af9d084614f10cdf97a6 \
|
||||
--hash=sha256:37de4010f53f452e94e5ed6684480432cfe6a7a8914307ef819cd028b05b98d5 \
|
||||
--hash=sha256:49c138856035cb97f0053e5e57ba90ec936b28a0b8b0020d44965c7b0c0bf03a \
|
||||
--hash=sha256:4f9832815257969b3ca9bf0501351e4c02c8d60cbd3ec9f9070d5b0f8852900e \
|
||||
--hash=sha256:566a0ac347cf4632f551e7b28bbd0d215af82e6ffaa2556f565a3b6b51dc3f81 \
|
||||
--hash=sha256:6777673d346071451bf7cccf8d0499024f1bd6a835fc90b4fe7af50373d92ce6 \
|
||||
--hash=sha256:72746ec17a7d9c5acf2c57a6e6190ceba3dad7127cd85bb17f24e90acc0e8e3f \
|
||||
--hash=sha256:755f653d693f9b8f4286d987aec0d4279821bf8d179a9de8e8a5c685e77e57d6 \
|
||||
--hash=sha256:7612a7366a0855a04430363fb4ab392dc6818aaece0b2e325ff30ee77af9b21f \
|
||||
--hash=sha256:7ad24c85f2a1caf0cd1ae8c2fdb668777a51a02246d9039420f94bd7dbfd37ed \
|
||||
--hash=sha256:881cc388dded44ae6e17a1666364b98bd76bcdc71b869014ae725f06ba298e0e \
|
||||
--hash=sha256:8d97b37b4e60073c38bcf94e289e3be09ef9be870de88d163f16e08f2b9ded1a \
|
||||
--hash=sha256:9119795d2405eb23bf7e6707e228fe38124df029494c1b3576459aa3202ea432 \
|
||||
--hash=sha256:9136d596111c742d061c0f99bab95c5370016c4101a32e72c2b634ad5e0757e6 \
|
||||
--hash=sha256:9ad883ac4f5225999747f0849643c4d0ec809d9ffe0ddc81a81dd3e68d0af463 \
|
||||
--hash=sha256:a25b4c4fdd633501233924f873e6f6cd8970732859ecfe4ecfb60635881f70be \
|
||||
--hash=sha256:a30e4db983faa5145e00ef6eaf894a2d503b3221dbf40a595f3011930d3d0bac \
|
||||
--hash=sha256:a5e9e78332a5d841422b88b8c490dfd7f761e64b3430249b66c05d02f72ceab0 \
|
||||
--hash=sha256:b4e08e3831671008888bad5d160d757ef35ce34dbb73b78c3998d16aa1334c97 \
|
||||
--hash=sha256:bf1aae95e80acea02a0a622e1c12d3fefc52ffd0fe7bda70a30d070373fbb6c3 \
|
||||
--hash=sha256:c61b89803a87a3b2a394089a7dadb79a6c64c89f2e8930cc187fec43b319f8d2 \
|
||||
--hash=sha256:cdf80359b641185ae7e580afb9f88cf560298f309a38182972091165bfe1225d \
|
||||
--hash=sha256:d93ebbff3dcf05274843ad8cf650b48ee634626e752c5d73614e5ec9df45f0ce \
|
||||
--hash=sha256:db24d2738add6db19d66ca820479d2f8f96d3f5a13c223f27fa28dd2f268a4bd \
|
||||
--hash=sha256:e0d20f27edfd6f35b388da2bdcd7769e4ffa374fef8994980ced26eb287e033a \
|
||||
--hash=sha256:e2f3b5236079bc3e318a92bab2cc3f669cc32127075ab03ff61cacbae1c392b8 \
|
||||
--hash=sha256:e481e54db8cec1457ee7c05f6d2329e3298a304a70d3b5e2e82e77170850b385 \
|
||||
--hash=sha256:e5e5dc300a0ca8755ada1569f5caccfcdca28607dfb98b86a54996b288a8ebd3 \
|
||||
--hash=sha256:ec2f525273528425ed2f51861b7b88955160cb95dddb17af0914077040aff4a5 \
|
||||
--hash=sha256:f234ba3bb339ad17803009c8251f5ee65dcf283a380817fe486823b08b26383d \
|
||||
--hash=sha256:f463598f9e51ccc04f0fe08500f9a0c3251a7086765350be418598b753b5561d \
|
||||
--hash=sha256:f717944aee40e9f48776cf85b523bb376aa2d9255a268d6d643c57ab387e7264 \
|
||||
--hash=sha256:fd0febae872a4042da44e972c070f0fd49a85a0a7727ab6b85425f74348be14e \
|
||||
--hash=sha256:fec56c7d1b6a22c8f01557de3975d962ee40270b81b60d1cfdadf2a105d10e84
|
||||
# via -r requirements.in
|
||||
typing-extensions==4.5.0 \
|
||||
--hash=sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb \
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
docker==6.1.1
|
||||
docker==6.1.2
|
||||
kubernetes==26.1.0
|
||||
jinja2==3.1.2
|
||||
python-dotenv==1.0.0
|
||||
requests==2.30.0
|
||||
redis==4.5.5
|
||||
urllib3==2.0.2
|
||||
|
|
|
@ -1,13 +1,9 @@
|
|||
#
|
||||
# This file is autogenerated by pip-compile with Python 3.9
|
||||
# This file is autogenerated by pip-compile with Python 3.11
|
||||
# by the following command:
|
||||
#
|
||||
# pip-compile --allow-unsafe --generate-hashes --resolver=backtracking
|
||||
#
|
||||
async-timeout==4.0.2 \
|
||||
--hash=sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15 \
|
||||
--hash=sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c
|
||||
# via redis
|
||||
cachetools==5.3.0 \
|
||||
--hash=sha256:13dfddc7b8df938c21a940dfa6557ce6e94a2f1cdfa58eb90c805721d58f2c14 \
|
||||
--hash=sha256:429e1a1e845c008ea6c85aa35d4b98b65d6a9763eeef3e37e92728a12d1de9d4
|
||||
|
@ -95,9 +91,9 @@ charset-normalizer==3.1.0 \
|
|||
--hash=sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df \
|
||||
--hash=sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab
|
||||
# via requests
|
||||
docker==6.1.1 \
|
||||
--hash=sha256:5ec18b9c49d48ee145a5b5824bb126dc32fc77931e18444783fc07a7724badc0 \
|
||||
--hash=sha256:8308b23d3d0982c74f7aa0a3abd774898c0c4fba006e9c3bde4f68354e470fe2
|
||||
docker==6.1.2 \
|
||||
--hash=sha256:134cd828f84543cbf8e594ff81ca90c38288df3c0a559794c12f2e4b634ea19e \
|
||||
--hash=sha256:dcc088adc2ec4e7cfc594e275d8bd2c9738c56c808de97476939ef67db5af8c2
|
||||
# via -r requirements.in
|
||||
google-auth==2.17.3 \
|
||||
--hash=sha256:ce311e2bc58b130fddf316df57c9b3943c2a7b4f6ec31de9663a9333e4064efc \
|
||||
|
@ -266,6 +262,7 @@ urllib3==2.0.2 \
|
|||
--hash=sha256:61717a1095d7e155cdb737ac7bb2f4324a858a1e2e6466f6d03ff630ca68d3cc \
|
||||
--hash=sha256:d055c2f9d38dc53c808f6fdc8eab7360b6fdbbde02340ed25cfbcd817c62469e
|
||||
# via
|
||||
# -r requirements.in
|
||||
# docker
|
||||
# kubernetes
|
||||
# requests
|
||||
|
|
|
@ -10,7 +10,12 @@ pip install pip --upgrade > /dev/null && pip install pip-compile-multi pip-upgra
|
|||
|
||||
echo "Updating requirements.in files"
|
||||
|
||||
files=("../../docs/requirements.txt" "../common/db/requirements.in" "../common/gen/requirements.in" "../scheduler/requirements.in" "../ui/requirements.in" "../../tests/requirements.txt" "../../tests/ui/requirements.txt")
|
||||
files=("../../docs/requirements.txt" "../common/db/requirements.in" "../common/gen/requirements.in" "../scheduler/requirements.in" "../ui/requirements.in")
|
||||
|
||||
for file in $(find ../../tests -iname "requirements.txt")
|
||||
do
|
||||
files+=("$file")
|
||||
done
|
||||
|
||||
for file in "${files[@]}"
|
||||
do
|
||||
|
@ -31,6 +36,8 @@ do
|
|||
echo "No need to generate hashes for $file"
|
||||
fi
|
||||
|
||||
echo " "
|
||||
|
||||
cd -
|
||||
done
|
||||
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
schedule==1.2.0
|
||||
certbot==2.5.0
|
||||
maxminddb==2.2.0
|
||||
certbot==2.6.0
|
||||
maxminddb==2.3.0
|
|
@ -4,13 +4,13 @@
|
|||
#
|
||||
# pip-compile --allow-unsafe --generate-hashes --resolver=backtracking
|
||||
#
|
||||
acme==2.5.0 \
|
||||
--hash=sha256:416586ed660c5ebcec1e16a134d2e522ac6d6fc4f972955354f0b1a7e61ac97c \
|
||||
--hash=sha256:9c760b115c54f55f692321abe5d09f5c70ce437e61f9f22340341621d9909a5a
|
||||
acme==2.6.0 \
|
||||
--hash=sha256:261e8655034f3afe099e68a9c5061304f5ba0c1625ec1b8288fc228315bacbdf \
|
||||
--hash=sha256:607360db73e458150ab63825a82b220bdf04badbd81c6d3218e5d993c6be491c
|
||||
# via certbot
|
||||
certbot==2.5.0 \
|
||||
--hash=sha256:76e6e5305021d3ee54c42fc471f8f0ed5dba790e6fd7fef6713060b0e42b97d7 \
|
||||
--hash=sha256:a2d730753124508effe79f648264f5cab4d1e9120acfd695a4a0c2b7bab4a966
|
||||
certbot==2.6.0 \
|
||||
--hash=sha256:6513b9fb3d266f1fa3803a1713eb7e415bbc250b624c834e02cb71086b26d800 \
|
||||
--hash=sha256:c4de6bb0d092729650ed90a5bdb513932bdc47ec5f7f98049180ab8e4a835dab
|
||||
# via -r requirements.in
|
||||
certifi==2023.5.7 \
|
||||
--hash=sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7 \
|
||||
|
@ -206,8 +206,8 @@ josepy==1.13.0 \
|
|||
# via
|
||||
# acme
|
||||
# certbot
|
||||
maxminddb==2.2.0 \
|
||||
--hash=sha256:e37707ec4fab115804670e0fb7aedb4b57075a8b6f80052bdc648d3c005184e5
|
||||
maxminddb==2.3.0 \
|
||||
--hash=sha256:120920dddd955f32ae48c207c6cef6fd5dc8874a889ba94b0f2c1f736ecdf308
|
||||
# via -r requirements.in
|
||||
parsedatetime==2.6 \
|
||||
--hash=sha256:4cb368fbb18a0b7231f4d76119165451c8d2e35951455dfee97c62a87b04d455 \
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
# Install firefox and geckodriver
|
||||
RUN apk add --no-cache --virtual .build-deps curl grep zip && \
|
||||
apk add --no-cache firefox
|
||||
|
||||
# Installing geckodriver for firefox...
|
||||
RUN GECKODRIVER_VERSION=`curl -i https://github.com/mozilla/geckodriver/releases/latest | grep -Po 'v[0-9]+.[0-9]+.[0-9]+'` && \
|
||||
wget -O geckodriver.tar.gz https://github.com/mozilla/geckodriver/releases/download/$GECKODRIVER_VERSION/geckodriver-$GECKODRIVER_VERSION-linux64.tar.gz && \
|
||||
tar -C /usr/local/bin -xzvf geckodriver.tar.gz && \
|
||||
chmod +x /usr/local/bin/geckodriver && \
|
||||
rm geckodriver.tar.gz
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,18 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_ANTIBOT: "no"
|
||||
ANTIBOT_URI: "/challenge"
|
||||
extra_hosts:
|
||||
- "www.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
|
@ -0,0 +1,68 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24"
|
||||
HTTP_PORT: "80"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
USE_REVERSE_PROXY: "yes"
|
||||
REVERSE_PROXY_HOST: "http://app1:8080"
|
||||
REVERSE_PROXY_URL: "/"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? ANTIBOT settings
|
||||
USE_ANTIBOT: "no"
|
||||
ANTIBOT_URI: "/challenge"
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
app1:
|
||||
image: nginxdemos/nginx-hello
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.4
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-docker:
|
|
@ -0,0 +1,95 @@
|
|||
from contextlib import suppress
|
||||
from os import getenv
|
||||
from requests import get
|
||||
from requests.exceptions import RequestException
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.firefox.options import Options
|
||||
from selenium.common.exceptions import NoSuchElementException
|
||||
from time import sleep
|
||||
from traceback import format_exc
|
||||
|
||||
try:
|
||||
ready = False
|
||||
retries = 0
|
||||
while not ready:
|
||||
with suppress(RequestException):
|
||||
status_code = get(
|
||||
"http://www.example.com", headers={"Host": "www.example.com"}
|
||||
).status_code
|
||||
|
||||
if status_code >= 500:
|
||||
print("❌ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
ready = status_code < 400
|
||||
|
||||
if retries > 10:
|
||||
print("❌ The service took too long to be ready, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not ready:
|
||||
retries += 1
|
||||
print(
|
||||
"⚠️ Waiting for the service to be ready, retrying in 5s ...", flush=True
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
firefox_options = Options()
|
||||
firefox_options.add_argument("--headless")
|
||||
|
||||
test_type = getenv("USE_ANTIBOT", "no")
|
||||
antibot_uri = getenv("ANTIBOT_URI", "/challenge")
|
||||
|
||||
if test_type != "javascript":
|
||||
print("ℹ️ Starting Firefox ...", flush=True)
|
||||
with webdriver.Firefox(options=firefox_options) as driver:
|
||||
driver.delete_all_cookies()
|
||||
driver.maximize_window()
|
||||
|
||||
print("ℹ️ Navigating to http://www.example.com ...", flush=True)
|
||||
|
||||
driver.get("http://www.example.com")
|
||||
|
||||
if driver.current_url.endswith(antibot_uri) and test_type == "no":
|
||||
print("❌ Antibot is enabled, it shouldn't be ...", flush=True)
|
||||
exit(1)
|
||||
elif test_type == "captcha":
|
||||
if not driver.current_url.endswith(antibot_uri):
|
||||
print(
|
||||
"❌ Antibot is disabled or the endpoint is wrong ...", flush=True
|
||||
)
|
||||
exit(1)
|
||||
try:
|
||||
driver.find_element(By.XPATH, "//input[@name='captcha']")
|
||||
except NoSuchElementException:
|
||||
print("❌ The captcha input is missing ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
print(
|
||||
f"✅ The captcha input is present{' and the endpoint is correct' if antibot_uri != '/challenge' else ''} ...",
|
||||
flush=True,
|
||||
)
|
||||
else:
|
||||
print("✅ Antibot is disabled, as expected ...", flush=True)
|
||||
else:
|
||||
status_code = get(
|
||||
"http://www.example.com",
|
||||
headers={"Host": "www.example.com"},
|
||||
allow_redirects=False,
|
||||
).status_code
|
||||
if status_code >= 500:
|
||||
print("ℹ️ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif status_code != 302:
|
||||
print(
|
||||
"❌ The server should have redirected to the antibot page ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ Status code is 302, as expected ...", flush=True)
|
||||
except SystemExit:
|
||||
exit(1)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1,2 @@
|
|||
requests==2.30.0
|
||||
selenium==4.9.1
|
|
@ -0,0 +1,110 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "🤖 Building antibot stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker app1
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🤖 Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🤖 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@ANTIBOT_URI: "/custom"@ANTIBOT_URI: "/challenge"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_ANTIBOT: ".*"$@USE_ANTIBOT: "no"@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🤖 Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🤖 Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🤖 Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
for test in "deactivated" "javascript" "captcha" "endpoint"
|
||||
do
|
||||
if [ "$test" = "deactivated" ] ; then
|
||||
echo "🤖 Running tests without antibot ..."
|
||||
elif [ "$test" = "endpoint" ] ; then
|
||||
echo "🤖 Running tests where antibot is on a different endpoint ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@ANTIBOT_URI: "/challenge"@ANTIBOT_URI: "/custom"@' {} \;
|
||||
elif [ "$test" != "deactivated" ] ; then
|
||||
echo "🤖 Running tests with antibot \"$test\" ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_ANTIBOT: ".*"$@USE_ANTIBOT: "'"${test}"'"@' {} \;
|
||||
fi
|
||||
|
||||
echo "🤖 Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🤖 Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "🤖 Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("antibot-bw-1" "antibot-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "🤖 Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "🤖 Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
docker compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🤖 Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "🤖 Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "🤖 Tests are done ! ✅"
|
|
@ -0,0 +1,25 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
# Install firefox and geckodriver
|
||||
RUN apk add --no-cache --virtual .build-deps curl grep zip && \
|
||||
apk add --no-cache firefox
|
||||
|
||||
# Installing geckodriver for firefox...
|
||||
RUN GECKODRIVER_VERSION=`curl -i https://github.com/mozilla/geckodriver/releases/latest | grep -Po 'v[0-9]+.[0-9]+.[0-9]+'` && \
|
||||
wget -O geckodriver.tar.gz https://github.com/mozilla/geckodriver/releases/download/$GECKODRIVER_VERSION/geckodriver-$GECKODRIVER_VERSION-linux64.tar.gz && \
|
||||
tar -C /usr/local/bin -xzvf geckodriver.tar.gz && \
|
||||
chmod +x /usr/local/bin/geckodriver && \
|
||||
rm geckodriver.tar.gz
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,20 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_AUTH_BASIC: "no"
|
||||
AUTH_BASIC_LOCATION: "sitewide"
|
||||
AUTH_BASIC_USER: "bunkerity"
|
||||
AUTH_BASIC_PASSWORD: "Secr3tP@ssw0rd"
|
||||
extra_hosts:
|
||||
- "www.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
|
@ -0,0 +1,70 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24"
|
||||
HTTP_PORT: "80"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
USE_REVERSE_PROXY: "yes"
|
||||
REVERSE_PROXY_HOST: "http://app1:8080"
|
||||
REVERSE_PROXY_URL: "/"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? AUTH_BASIC settings
|
||||
USE_AUTH_BASIC: "no"
|
||||
AUTH_BASIC_LOCATION: "sitewide"
|
||||
AUTH_BASIC_USER: "bunkerity"
|
||||
AUTH_BASIC_PASSWORD: "Secr3tP@ssw0rd"
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
app1:
|
||||
image: nginxdemos/nginx-hello
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.4
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-docker:
|
|
@ -0,0 +1,106 @@
|
|||
from contextlib import suppress
|
||||
from os import getenv
|
||||
from requests import get
|
||||
from requests.exceptions import RequestException
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.firefox.options import Options
|
||||
from selenium.common.exceptions import NoSuchElementException
|
||||
from time import sleep
|
||||
from traceback import format_exc
|
||||
|
||||
try:
|
||||
ready = False
|
||||
retries = 0
|
||||
while not ready:
|
||||
with suppress(RequestException):
|
||||
status_code = get(
|
||||
"http://www.example.com", headers={"Host": "www.example.com"}
|
||||
).status_code
|
||||
|
||||
if status_code >= 500:
|
||||
print("❌ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
ready = status_code <= 401
|
||||
|
||||
if retries > 10:
|
||||
print("❌ The service took too long to be ready, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not ready:
|
||||
retries += 1
|
||||
print(
|
||||
"⚠️ Waiting for the service to be ready, retrying in 5s ...", flush=True
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
firefox_options = Options()
|
||||
firefox_options.add_argument("--headless")
|
||||
|
||||
use_auth_basic = getenv("USE_AUTH_BASIC", "no")
|
||||
auth_basic_location = getenv("AUTH_BASIC_LOCATION", "sitewide")
|
||||
auth_basic_username = getenv("AUTH_BASIC_USER", "bunkerity")
|
||||
auth_basic_password = getenv("AUTH_BASIC_PASSWORD", "Secr3tP@ssw0rd")
|
||||
|
||||
print("ℹ️ Starting Firefox ...", flush=True)
|
||||
with webdriver.Firefox(options=firefox_options) as driver:
|
||||
driver.delete_all_cookies()
|
||||
driver.maximize_window()
|
||||
|
||||
if use_auth_basic == "no" or auth_basic_location != "sitewide":
|
||||
print("ℹ️ Navigating to http://www.example.com ...", flush=True)
|
||||
driver.get("http://www.example.com")
|
||||
|
||||
try:
|
||||
driver.find_element(By.XPATH, "//img[@alt='NGINX Logo']")
|
||||
except NoSuchElementException:
|
||||
print("❌ The page is not accessible ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
if use_auth_basic == "no":
|
||||
print("✅ Auth-basic is disabled, as expected ...", flush=True)
|
||||
else:
|
||||
print(
|
||||
f"ℹ️ Trying to access http://www.example.com{auth_basic_location} ...",
|
||||
flush=True,
|
||||
)
|
||||
status_code = get(
|
||||
f"http://www.example.com{auth_basic_location}",
|
||||
headers={"Host": "www.example.com"},
|
||||
).status_code
|
||||
|
||||
if status_code != 401:
|
||||
print("❌ The page is accessible without auth-basic ...", flush=True)
|
||||
exit(1)
|
||||
print(
|
||||
"✅ Auth-basic is enabled and working in the expected location ...",
|
||||
)
|
||||
else:
|
||||
print(f"ℹ️ Trying to access http://www.example.com ...", flush=True)
|
||||
status_code = get(
|
||||
"http://www.example.com", headers={"Host": "www.example.com"}
|
||||
).status_code
|
||||
|
||||
if status_code != 401:
|
||||
print("❌ The page is accessible without auth-basic ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
print(
|
||||
f"ℹ️ Trying to access http://{auth_basic_username}:{auth_basic_password}@www.example.com ...",
|
||||
flush=True,
|
||||
)
|
||||
driver.get(
|
||||
f"http://{auth_basic_username}:{auth_basic_password}@www.example.com"
|
||||
)
|
||||
|
||||
try:
|
||||
driver.find_element(By.XPATH, "//img[@alt='NGINX Logo']")
|
||||
except NoSuchElementException:
|
||||
print("❌ The page is not accessible ...", flush=True)
|
||||
exit(1)
|
||||
print("✅ Auth-basic is enabled and working, as expected ...", flush=True)
|
||||
except SystemExit:
|
||||
exit(1)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1,2 @@
|
|||
requests==2.30.0
|
||||
selenium==4.9.1
|
|
@ -0,0 +1,119 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "🔐 Building authbasic stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker app1
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔐 Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔐 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_AUTH_BASIC: "yes"@USE_AUTH_BASIC: "no"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@AUTH_BASIC_LOCATION: "/auth"@AUTH_BASIC_LOCATION: "sitewide"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@AUTH_BASIC_USER: "admin"@AUTH_BASIC_USER: "bunkerity"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@AUTH_BASIC_PASSWORD: "password"@AUTH_BASIC_PASSWORD: "Secr3tP\@ssw0rd"@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🔐 Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔐 Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🔐 Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
for test in "deactivated" "sitewide" "location" "user" "password"
|
||||
do
|
||||
if [ "$test" = "deactivated" ] ; then
|
||||
echo "🔐 Running tests without authbasic ..."
|
||||
elif [ "$test" = "sitewide" ] ; then
|
||||
echo "🔐 Running tests with sitewide authbasic ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_AUTH_BASIC: "no"@USE_AUTH_BASIC: "yes"@' {} \;
|
||||
elif [ "$test" = "location" ] ; then
|
||||
echo "🔐 Running tests with the location changed ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@AUTH_BASIC_LOCATION: "sitewide"@AUTH_BASIC_LOCATION: "/auth"@' {} \;
|
||||
elif [ "$test" = "user" ] ; then
|
||||
echo "🔐 Running tests with the user changed ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@AUTH_BASIC_LOCATION: "/auth"@AUTH_BASIC_LOCATION: "sitewide"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@AUTH_BASIC_USER: "bunkerity"@AUTH_BASIC_USER: "admin"@' {} \;
|
||||
elif [ "$test" = "password" ] ; then
|
||||
echo "🔐 Running tests with the password changed ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@AUTH_BASIC_PASSWORD: "Secr3tP\@ssw0rd"@AUTH_BASIC_PASSWORD: "password"@' {} \;
|
||||
fi
|
||||
|
||||
echo "🔐 Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔐 Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "🔐 Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("authbasic-bw-1" "authbasic-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "🔐 Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "🔐 Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
docker compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔐 Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "🔐 Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "🔐 Tests are done ! ✅"
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,25 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
USE_BAD_BEHAVIOR: "yes"
|
||||
BAD_BEHAVIOR_STATUS_CODES: "400 401 403 404 405 429 444"
|
||||
BAD_BEHAVIOR_BAN_TIME: "86400"
|
||||
BAD_BEHAVIOR_THRESHOLD: "10"
|
||||
BAD_BEHAVIOR_COUNT_TIME: "60"
|
||||
extra_hosts:
|
||||
- "www.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-docker:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
||||
bw-docker:
|
||||
external: true
|
|
@ -0,0 +1,65 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
volumes:
|
||||
- ./index.html:/var/www/html/index.html
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24 192.168.0.3"
|
||||
HTTP_PORT: "80"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? BAD_BEHAVIOR settings
|
||||
USE_BAD_BEHAVIOR: "yes"
|
||||
BAD_BEHAVIOR_STATUS_CODES: "400 401 403 404 405 429 444"
|
||||
BAD_BEHAVIOR_BAN_TIME: "86400"
|
||||
BAD_BEHAVIOR_THRESHOLD: "10"
|
||||
BAD_BEHAVIOR_COUNT_TIME: "60"
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-docker:
|
||||
name: bw-docker
|
|
@ -0,0 +1,134 @@
|
|||
from contextlib import suppress
|
||||
from datetime import datetime
|
||||
from docker import DockerClient
|
||||
from os import getenv
|
||||
from requests import get
|
||||
from requests.exceptions import RequestException
|
||||
from time import sleep
|
||||
from traceback import format_exc
|
||||
|
||||
try:
|
||||
ready = False
|
||||
retries = 0
|
||||
while not ready:
|
||||
with suppress(RequestException):
|
||||
status_code = get(
|
||||
"http://www.example.com", headers={"Host": "www.example.com"}
|
||||
).status_code
|
||||
|
||||
if status_code >= 500:
|
||||
print("❌ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
ready = status_code < 400
|
||||
|
||||
if retries > 10:
|
||||
print("❌ The service took too long to be ready, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not ready:
|
||||
retries += 1
|
||||
print(
|
||||
"⚠️ Waiting for the service to be ready, retrying in 5s ...", flush=True
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
use_bad_behavior = getenv("USE_BAD_BEHAVIOR", "yes")
|
||||
bad_behavior_status_codes = getenv(
|
||||
"BAD_BEHAVIOR_STATUS_CODES", "400 401 403 404 405 429 444"
|
||||
)
|
||||
bad_behavior_ban_time = getenv("BAD_BEHAVIOR_BAN_TIME", "86400")
|
||||
bad_behavior_threshold = getenv("BAD_BEHAVIOR_THRESHOLD", "10")
|
||||
bad_behavior_count_time = getenv("BAD_BEHAVIOR_COUNT_TIME", "60")
|
||||
|
||||
print(
|
||||
"ℹ️ Sending 15 requests to http://www.example.com/?id=/etc/passwd ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
for _ in range(15):
|
||||
get(
|
||||
"http://www.example.com/?id=/etc/passwd",
|
||||
headers={"Host": "www.example.com"},
|
||||
)
|
||||
|
||||
sleep(1)
|
||||
|
||||
status_code = get(
|
||||
f"http://www.example.com",
|
||||
headers={"Host": "www.example.com"},
|
||||
).status_code
|
||||
|
||||
if status_code == 403:
|
||||
if use_bad_behavior == "no":
|
||||
print("❌ Bad Behavior is enabled, it shouldn't be ...", flush=True)
|
||||
exit(1)
|
||||
elif bad_behavior_status_codes != "400 401 403 404 405 429 444":
|
||||
print("❌ Bad Behavior's status codes didn't changed ...", flush=True)
|
||||
exit(1)
|
||||
elif bad_behavior_ban_time != "86400":
|
||||
print(
|
||||
"ℹ️ Sleeping for 7s to wait if Bad Behavior's ban time changed ...",
|
||||
flush=True,
|
||||
)
|
||||
sleep(7)
|
||||
|
||||
status_code = get(
|
||||
f"http://www.example.com",
|
||||
headers={"Host": "www.example.com"},
|
||||
).status_code
|
||||
|
||||
if status_code == 403:
|
||||
print("❌ Bad Behavior's ban time didn't changed ...", flush=True)
|
||||
exit(1)
|
||||
elif bad_behavior_threshold != "10":
|
||||
print("❌ Bad Behavior's threshold didn't changed ...", flush=True)
|
||||
exit(1)
|
||||
elif bad_behavior_count_time != "60":
|
||||
print(
|
||||
"ℹ️ Sleeping for 7s to wait if Bad Behavior's count time changed ...",
|
||||
flush=True,
|
||||
)
|
||||
current_time = datetime.now().timestamp()
|
||||
sleep(7)
|
||||
|
||||
print(
|
||||
"ℹ️ Checking BunkerWeb's logs to see if Bad Behavior's count time changed ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
docker_host = getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
|
||||
docker_client = DockerClient(base_url=docker_host)
|
||||
|
||||
bw_instances = docker_client.containers.list(
|
||||
filters={"label": "bunkerweb.INSTANCE"}
|
||||
)
|
||||
|
||||
if not bw_instances:
|
||||
print("❌ BunkerWeb instance not found ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
bw_instance = bw_instances[0]
|
||||
|
||||
found = False
|
||||
for log in bw_instance.logs(since=current_time).split(b"\n"):
|
||||
if b"decreased counter for IP 192.168.0.3 (0/10)" in log:
|
||||
found = True
|
||||
break
|
||||
|
||||
if not found:
|
||||
print("❌ Bad Behavior's count time didn't changed ...", flush=True)
|
||||
exit(1)
|
||||
elif (
|
||||
use_bad_behavior == "yes"
|
||||
and bad_behavior_status_codes == "400 401 403 404 405 429 444"
|
||||
and bad_behavior_threshold == "10"
|
||||
):
|
||||
print("❌ Bad Behavior is disabled, it shouldn't be ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
print("✅ Bad Behavior is working as expected ...", flush=True)
|
||||
except SystemExit:
|
||||
exit(1)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1,2 @@
|
|||
requests==2.30.0
|
||||
docker==6.1.2
|
|
@ -0,0 +1,126 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "📟 Building badbehavior stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📟 Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📟 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BAD_BEHAVIOR: "no"@USE_BAD_BEHAVIOR: "yes"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_STATUS_CODES: "400 401 404 405 429 444"@BAD_BEHAVIOR_STATUS_CODES: "400 401 403 404 405 429 444"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_BAN_TIME: "5"@BAD_BEHAVIOR_BAN_TIME: "86400"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_THRESHOLD: "20"@BAD_BEHAVIOR_THRESHOLD: "10"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_COUNT_TIME: "5"@BAD_BEHAVIOR_COUNT_TIME: "60"@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "📟 Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📟 Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "📟 Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
for test in "activated" "deactivated" "status_codes" "ban_time" "threshold" "count_time"
|
||||
do
|
||||
if [ "$test" = "activated" ] ; then
|
||||
echo "📟 Running tests with badbehavior activated ..."
|
||||
elif [ "$test" = "deactivated" ] ; then
|
||||
echo "📟 Running tests without badbehavior ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BAD_BEHAVIOR: "yes"@USE_BAD_BEHAVIOR: "no"@' {} \;
|
||||
elif [ "$test" = "status_codes" ] ; then
|
||||
echo "📟 Running tests with badbehavior's 403 status code removed from the list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BAD_BEHAVIOR: "no"@USE_BAD_BEHAVIOR: "yes"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_STATUS_CODES: "400 401 403 404 405 429 444"@BAD_BEHAVIOR_STATUS_CODES: "400 401 404 405 429 444"@' {} \;
|
||||
elif [ "$test" = "ban_time" ] ; then
|
||||
echo "📟 Running tests with badbehavior's ban time changed to 5 seconds ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_STATUS_CODES: "400 401 404 405 429 444"@BAD_BEHAVIOR_STATUS_CODES: "400 401 403 404 405 429 444"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_BAN_TIME: "86400"@BAD_BEHAVIOR_BAN_TIME: "5"@' {} \;
|
||||
elif [ "$test" = "threshold" ] ; then
|
||||
echo "📟 Running tests with badbehavior's threshold set to 20 ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_BAN_TIME: "5"@BAD_BEHAVIOR_BAN_TIME: "86400"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_THRESHOLD: "10"@BAD_BEHAVIOR_THRESHOLD: "20"@' {} \;
|
||||
elif [ "$test" = "count_time" ] ; then
|
||||
echo "📟 Running tests with badbehavior's count time set to 5 seconds ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_THRESHOLD: "20"@BAD_BEHAVIOR_THRESHOLD: "10"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BAD_BEHAVIOR_COUNT_TIME: "60"@BAD_BEHAVIOR_COUNT_TIME: "5"@' {} \;
|
||||
fi
|
||||
|
||||
echo "📟 Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📟 Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "📟 Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("badbehavior-bw-1" "badbehavior-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "📟 Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "📟 Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
docker compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📟 Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "📟 Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "📟 Tests are done ! ✅"
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/blacklist_api
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8080", "--proxy-headers", "--forwarded-allow-ips", "\"*\"" ]
|
|
@ -0,0 +1,30 @@
|
|||
from fastapi import FastAPI
|
||||
from fastapi.responses import PlainTextResponse
|
||||
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
|
||||
@app.get("/ip")
|
||||
async def ip():
|
||||
return PlainTextResponse("192.168.0.3\n10.0.0.0/8\n127.0.0.1/32")
|
||||
|
||||
|
||||
@app.get("/rdns")
|
||||
async def rdns():
|
||||
return PlainTextResponse(".example.com\n.example.org\n.bw-services")
|
||||
|
||||
|
||||
@app.get("/asn")
|
||||
async def asn():
|
||||
return PlainTextResponse("1234\n13335\n5678")
|
||||
|
||||
|
||||
@app.get("/user_agent")
|
||||
async def user_agent():
|
||||
return PlainTextResponse("BunkerBot\nCensysInspect\nShodanInspect\nZmEu\nmasscan")
|
||||
|
||||
|
||||
@app.get("/uri")
|
||||
async def uri():
|
||||
return PlainTextResponse("/admin\n/login")
|
|
@ -0,0 +1,2 @@
|
|||
fastapi==0.95.1
|
||||
uvicorn[standard]==0.22.0
|
|
@ -0,0 +1,9 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
init:
|
||||
build: init
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
volumes:
|
||||
- ./init/output:/output
|
|
@ -0,0 +1,72 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_BLACKLIST: "yes"
|
||||
BLACKLIST_IP: ""
|
||||
BLACKLIST_IP_URLS: ""
|
||||
BLACKLIST_RDNS_GLOBAL: "yes"
|
||||
BLACKLIST_RDNS: ""
|
||||
BLACKLIST_RDNS_URLS: ""
|
||||
BLACKLIST_ASN: ""
|
||||
BLACKLIST_ASN_URLS: ""
|
||||
BLACKLIST_USER_AGENT: ""
|
||||
BLACKLIST_USER_AGENT_URLS: ""
|
||||
BLACKLIST_URI: ""
|
||||
BLACKLIST_URI_URLS: ""
|
||||
BLACKLIST_IGNORE_IP: ""
|
||||
BLACKLIST_IGNORE_IP_URLS: ""
|
||||
BLACKLIST_IGNORE_RDNS: ""
|
||||
BLACKLIST_IGNORE_RDNS_URLS: ""
|
||||
BLACKLIST_IGNORE_ASN: ""
|
||||
BLACKLIST_IGNORE_ASN_URLS: ""
|
||||
BLACKLIST_IGNORE_USER_AGENT: ""
|
||||
BLACKLIST_IGNORE_USER_AGENT_URLS: ""
|
||||
BLACKLIST_IGNORE_URI: ""
|
||||
BLACKLIST_IGNORE_URI_URLS: ""
|
||||
extra_hosts:
|
||||
- "www.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
global-tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_BLACKLIST: "yes"
|
||||
BLACKLIST_IP: ""
|
||||
BLACKLIST_IP_URLS: ""
|
||||
BLACKLIST_RDNS_GLOBAL: "yes"
|
||||
BLACKLIST_RDNS: ""
|
||||
BLACKLIST_RDNS_URLS: ""
|
||||
BLACKLIST_ASN: ""
|
||||
BLACKLIST_ASN_URLS: ""
|
||||
BLACKLIST_USER_AGENT: ""
|
||||
BLACKLIST_USER_AGENT_URLS: ""
|
||||
BLACKLIST_URI: ""
|
||||
BLACKLIST_URI_URLS: ""
|
||||
BLACKLIST_IGNORE_IP: ""
|
||||
BLACKLIST_IGNORE_IP_URLS: ""
|
||||
BLACKLIST_IGNORE_RDNS: ""
|
||||
BLACKLIST_IGNORE_RDNS_URLS: ""
|
||||
BLACKLIST_IGNORE_ASN: ""
|
||||
BLACKLIST_IGNORE_ASN_URLS: ""
|
||||
BLACKLIST_IGNORE_USER_AGENT: ""
|
||||
BLACKLIST_IGNORE_USER_AGENT_URLS: ""
|
||||
BLACKLIST_IGNORE_URI: ""
|
||||
BLACKLIST_IGNORE_URI_URLS: ""
|
||||
extra_hosts:
|
||||
- "www.example.com:1.0.0.2"
|
||||
networks:
|
||||
bw-global-network:
|
||||
ipv4_address: 1.0.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
||||
bw-global-network:
|
||||
external: true
|
|
@ -0,0 +1,102 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
volumes:
|
||||
- ./index.html:/var/www/html/index.html
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24"
|
||||
HTTP_PORT: "80"
|
||||
USE_BUNKERNET: "no"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? BLACKLIST settings
|
||||
USE_BLACKLIST: "yes"
|
||||
BLACKLIST_IP: ""
|
||||
BLACKLIST_IP_URLS: ""
|
||||
BLACKLIST_RDNS_GLOBAL: "yes"
|
||||
BLACKLIST_RDNS: ""
|
||||
BLACKLIST_RDNS_URLS: ""
|
||||
BLACKLIST_ASN: ""
|
||||
BLACKLIST_ASN_URLS: ""
|
||||
BLACKLIST_USER_AGENT: ""
|
||||
BLACKLIST_USER_AGENT_URLS: ""
|
||||
BLACKLIST_URI: ""
|
||||
BLACKLIST_URI_URLS: ""
|
||||
BLACKLIST_IGNORE_IP: ""
|
||||
BLACKLIST_IGNORE_IP_URLS: ""
|
||||
BLACKLIST_IGNORE_RDNS: ""
|
||||
BLACKLIST_IGNORE_RDNS_URLS: ""
|
||||
BLACKLIST_IGNORE_ASN: ""
|
||||
BLACKLIST_IGNORE_ASN_URLS: ""
|
||||
BLACKLIST_IGNORE_USER_AGENT: ""
|
||||
BLACKLIST_IGNORE_USER_AGENT_URLS: ""
|
||||
BLACKLIST_IGNORE_URI: ""
|
||||
BLACKLIST_IGNORE_URI_URLS: ""
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
bw-global-network:
|
||||
ipv4_address: 1.0.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
volumes:
|
||||
- bw-data:/data
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
blacklist-api:
|
||||
build: api
|
||||
networks:
|
||||
bw-docker:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.4
|
||||
|
||||
volumes:
|
||||
bw-data:
|
||||
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-global-network:
|
||||
name: bw-global-network
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 1.0.0.0/8
|
||||
bw-docker:
|
||||
name: bw-docker
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/blacklist_init
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,33 @@
|
|||
from datetime import date
|
||||
from gzip import GzipFile
|
||||
from io import BytesIO
|
||||
from pathlib import Path
|
||||
from maxminddb import MODE_FD, open_database
|
||||
from requests import get
|
||||
|
||||
# Compute the mmdb URL
|
||||
mmdb_url = f"https://download.db-ip.com/free/dbip-asn-lite-{date.today().strftime('%Y-%m')}.mmdb.gz"
|
||||
|
||||
# Download the mmdb file in memory
|
||||
print(f"Downloading mmdb file from url {mmdb_url} ...", flush=True)
|
||||
file_content = BytesIO()
|
||||
with get(mmdb_url, stream=True) as resp:
|
||||
resp.raise_for_status()
|
||||
for chunk in resp.iter_content(chunk_size=4 * 1024):
|
||||
if chunk:
|
||||
file_content.write(chunk)
|
||||
file_content.seek(0)
|
||||
|
||||
with open_database(GzipFile(fileobj=file_content, mode="rb"), mode=MODE_FD) as reader:
|
||||
dbip_asn = reader.get("1.0.0.3")
|
||||
|
||||
if not dbip_asn:
|
||||
print(f"❌ Error while reading mmdb file from {mmdb_url}", flush=True)
|
||||
exit(1)
|
||||
|
||||
print(
|
||||
f"✅ ASN for IP 1.0.0.3 is {dbip_asn['autonomous_system_number']}, saving it to /output/ip_asn.txt",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
Path("/output/ip_asn.txt").write_text(str(dbip_asn["autonomous_system_number"]))
|
|
@ -0,0 +1,2 @@
|
|||
maxminddb==2.3.0
|
||||
requests==2.30.0
|
|
@ -0,0 +1,212 @@
|
|||
from contextlib import suppress
|
||||
from os import getenv
|
||||
from requests import get
|
||||
from requests.exceptions import RequestException
|
||||
from time import sleep
|
||||
from traceback import format_exc
|
||||
|
||||
try:
|
||||
ready = False
|
||||
retries = 0
|
||||
while not ready:
|
||||
with suppress(RequestException):
|
||||
status_code = get(
|
||||
"http://www.example.com", headers={"Host": "www.example.com"}
|
||||
).status_code
|
||||
|
||||
if status_code >= 500:
|
||||
print("❌ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
ready = status_code < 400 or status_code == 403
|
||||
|
||||
if retries > 10:
|
||||
print("❌ The service took too long to be ready, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not ready:
|
||||
retries += 1
|
||||
print(
|
||||
"⚠️ Waiting for the service to be ready, retrying in 5s ...", flush=True
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
use_blacklist = getenv("USE_BLACKLIST", "yes") == "yes"
|
||||
|
||||
blacklist_ip = getenv("BLACKLIST_IP", "")
|
||||
blacklist_ip_urls = getenv("BLACKLIST_IP_URLS", "")
|
||||
blacklist_rdns_global = getenv("BLACKLIST_RDNS_GLOBAL", "yes") == "yes"
|
||||
blacklist_rdns = getenv("BLACKLIST_RDNS", "")
|
||||
blacklist_rdns_urls = getenv("BLACKLIST_RDNS_URLS", "")
|
||||
blacklist_asn = getenv("BLACKLIST_ASN", "")
|
||||
blacklist_asn_urls = getenv("BLACKLIST_ASN_URLS", "")
|
||||
blacklist_user_agent = getenv("BLACKLIST_USER_AGENT", "")
|
||||
blacklist_user_agent_urls = getenv("BLACKLIST_USER_AGENT_URLS", "")
|
||||
blacklist_uri = getenv("BLACKLIST_URI", "")
|
||||
blacklist_uri_urls = getenv("BLACKLIST_URI_URLS", "")
|
||||
|
||||
blacklist_ignore_ip = getenv("BLACKLIST_IGNORE_IP", "")
|
||||
blacklist_ignore_ip_urls = getenv("BLACKLIST_IGNORE_IP_URLS", "")
|
||||
blacklist_ignore_rdns = getenv("BLACKLIST_IGNORE_RDNS", "")
|
||||
blacklist_ignore_rdns_urls = getenv("BLACKLIST_IGNORE_RDNS_URLS", "")
|
||||
blacklist_ignore_asn = getenv("BLACKLIST_IGNORE_ASN", "")
|
||||
blacklist_ignore_asn_urls = getenv("BLACKLIST_IGNORE_ASN_URLS", "")
|
||||
blacklist_ignore_user_agent = getenv("BLACKLIST_IGNORE_USER_AGENT", "")
|
||||
blacklist_ignore_user_agent_urls = getenv("BLACKLIST_IGNORE_USER_AGENT_URLS", "")
|
||||
blacklist_ignore_uri = getenv("BLACKLIST_IGNORE_URI", "")
|
||||
blacklist_ignore_uri_urls = getenv("BLACKLIST_IGNORE_URI_URLS", "")
|
||||
|
||||
print(
|
||||
"ℹ️ Sending a request to http://www.example.com/admin with User-Agent: BunkerBot ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
status_code = get(
|
||||
f"http://www.example.com/admin",
|
||||
headers={"Host": "www.example.com", "User-Agent": "BunkerBot"},
|
||||
).status_code
|
||||
|
||||
if status_code == 403:
|
||||
if not use_blacklist:
|
||||
print(
|
||||
"❌ The request was rejected, but the blacklist is disabled, exiting ..."
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_rdns_global and (
|
||||
blacklist_rdns != "" or blacklist_rdns_urls != ""
|
||||
):
|
||||
print(
|
||||
"❌ Blacklist's RDNS global didn't work as expected, exiting ...",
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_ignore_ip != "":
|
||||
print("❌ Blacklist's ignore IP didn't work as expected, exiting ...")
|
||||
exit(1)
|
||||
elif blacklist_ignore_ip_urls != "":
|
||||
print(
|
||||
"❌ Blacklist's ignore IP urls didn't work as expected, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_ignore_rdns != "":
|
||||
print("❌ Blacklist's ignore RDNS didn't work as expected, exiting ...")
|
||||
exit(1)
|
||||
elif blacklist_ignore_rdns_urls != "":
|
||||
print(
|
||||
"❌ Blacklist's ignore RDNS urls didn't work as expected, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_ignore_asn != "":
|
||||
print("❌ Blacklist's ignore ASN didn't work as expected, exiting ...")
|
||||
exit(1)
|
||||
elif blacklist_ignore_asn_urls != "":
|
||||
print(
|
||||
"❌ Blacklist's ignore ASN urls didn't work as expected, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_ignore_user_agent != "":
|
||||
print(
|
||||
"❌ Blacklist's ignore user agent didn't work as expected, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_ignore_user_agent_urls != "":
|
||||
print(
|
||||
"❌ Blacklist's ignore user agent urls didn't work as expected, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_ignore_uri != "":
|
||||
print("❌ Blacklist's ignore URI didn't work as expected, exiting ...")
|
||||
exit(1)
|
||||
elif blacklist_ignore_uri_urls != "":
|
||||
print(
|
||||
"❌ Blacklist's ignore URI urls didn't work as expected, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_ip != "" and not any(
|
||||
[blacklist_ignore_ip, blacklist_ignore_ip_urls, not use_blacklist]
|
||||
):
|
||||
print("❌ Blacklist's IP didn't work as expected, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif blacklist_ip_urls != "":
|
||||
print("❌ Blacklist's IP urls didn't work as expected, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif blacklist_rdns != "" and not any(
|
||||
[
|
||||
blacklist_ignore_rdns,
|
||||
blacklist_ignore_rdns_urls,
|
||||
blacklist_rdns_global,
|
||||
]
|
||||
):
|
||||
print("❌ Blacklist's RDNS didn't work as expected, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif blacklist_rdns_urls != "" and blacklist_rdns_global:
|
||||
print(
|
||||
"❌ Blacklist's RDNS urls didn't work as expected, exiting ...", flush=True
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_asn != "" and not any(
|
||||
[blacklist_ignore_asn, blacklist_ignore_asn_urls]
|
||||
):
|
||||
print("❌ Blacklist's ASN didn't work as expected, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif blacklist_asn_urls != "":
|
||||
print("❌ Blacklist's ASN urls didn't work as expected, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif blacklist_user_agent != "" and not any(
|
||||
[blacklist_ignore_user_agent, blacklist_ignore_user_agent_urls]
|
||||
):
|
||||
print(
|
||||
"❌ Blacklist's User Agent didn't work as expected, exiting ...", flush=True
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_user_agent_urls != "":
|
||||
print(
|
||||
"❌ Blacklist's User Agent urls didn't work as expected, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif blacklist_uri != "" and not any(
|
||||
[blacklist_ignore_uri, blacklist_ignore_uri_urls]
|
||||
):
|
||||
print("❌ Blacklist's URI didn't work as expected, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif blacklist_uri_urls != "":
|
||||
print("❌ Blacklist's URI urls didn't work as expected, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif use_blacklist and not any(
|
||||
[
|
||||
blacklist_ip,
|
||||
blacklist_ip_urls,
|
||||
blacklist_rdns,
|
||||
blacklist_rdns_urls,
|
||||
blacklist_asn,
|
||||
blacklist_asn_urls,
|
||||
blacklist_user_agent,
|
||||
blacklist_user_agent_urls,
|
||||
blacklist_uri,
|
||||
blacklist_uri_urls,
|
||||
blacklist_ignore_ip,
|
||||
blacklist_ignore_ip_urls,
|
||||
blacklist_ignore_rdns,
|
||||
blacklist_ignore_rdns_urls,
|
||||
blacklist_ignore_asn,
|
||||
blacklist_ignore_asn_urls,
|
||||
blacklist_ignore_user_agent,
|
||||
blacklist_ignore_user_agent_urls,
|
||||
blacklist_ignore_uri,
|
||||
blacklist_ignore_uri_urls,
|
||||
]
|
||||
):
|
||||
print("❌ Blacklist is disabled, it shouldn't be ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
print("✅ Blacklist is working as expected ...", flush=True)
|
||||
except SystemExit:
|
||||
exit(1)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1 @@
|
|||
requests==2.30.0
|
|
@ -0,0 +1,258 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "🏴 Building blacklist stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🏴 Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🏴 Building custom api image ..."
|
||||
docker compose build blacklist-api
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🏴 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🏴 Building tests images ..."
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🏴 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
as_number=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
rm -rf init/output
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BLACKLIST: "no"@USE_BLACKLIST: "yes"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IP: "0.0.0.0/0"@BLACKLIST_IP: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_IP: "192.168.0.3"@BLACKLIST_IGNORE_IP: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IP_URLS: "http://blacklist-api:8080/ip"@BLACKLIST_IP_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_IP_URLS: "http://blacklist-api:8080/ip"@BLACKLIST_IGNORE_IP_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_RDNS_GLOBAL: "no"@BLACKLIST_RDNS_GLOBAL: "yes"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_RDNS: ".bw-services"@BLACKLIST_RDNS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_RDNS: ".bw-services"@BLACKLIST_IGNORE_RDNS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_RDNS_URLS: "http://blacklist-api:8080/rdns"@BLACKLIST_RDNS_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_RDNS_URLS: "http://blacklist-api:8080/rdns"@BLACKLIST_IGNORE_RDNS_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_ASN: "[0-9]*"@BLACKLIST_ASN: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_ASN: "[0-9]*"@BLACKLIST_IGNORE_ASN: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_ASN_URLS: "http://blacklist-api:8080/asn"@BLACKLIST_ASN_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_ASN_URLS: "http://blacklist-api:8080/asn"@BLACKLIST_IGNORE_ASN_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_USER_AGENT: "BunkerBot"@BLACKLIST_USER_AGENT: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_USER_AGENT: "BunkerBot"@BLACKLIST_IGNORE_USER_AGENT: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_USER_AGENT_URLS: "http://blacklist-api:8080/user_agent"@BLACKLIST_USER_AGENT_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_USER_AGENT_URLS: "http://blacklist-api:8080/user_agent"@BLACKLIST_IGNORE_USER_AGENT_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_URI: "/admin"@BLACKLIST_URI: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_URI: "/admin"@BLACKLIST_IGNORE_URI: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_URI_URLS: "http://blacklist-api:8080/uri"@BLACKLIST_URI_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_URI_URLS: "http://blacklist-api:8080/uri"@BLACKLIST_IGNORE_URI_URLS: ""@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🏴 Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🏴 Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🏴 Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
echo "🏴 Initializing workspace ..."
|
||||
rm -rf init/output
|
||||
mkdir -p init/output
|
||||
docker compose -f docker-compose.init.yml up --build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🏴 Build failed ❌"
|
||||
exit 1
|
||||
elif ! [[ -f "init/output/ip_asn.txt" ]]; then
|
||||
echo "🏴 ip_asn.txt not found ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
as_number=$(cat init/output/ip_asn.txt)
|
||||
|
||||
if [[ $as_number = "" ]]; then
|
||||
echo "🏴 AS number not found ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -rf init/output
|
||||
|
||||
for test in "ip" "deactivated" "ignore_ip" "ignore_ip_urls" "ip_urls" "rdns" "rdns_global" "ignore_rdns" "ignore_rdns_urls" "rdns_urls" "asn" "ignore_asn" "ignore_asn_urls" "asn_urls" "user_agent" "ignore_user_agent" "ignore_user_agent_urls" "user_agent_urls" "uri" "ignore_uri" "ignore_uri_urls" "uri_urls"
|
||||
do
|
||||
if [ "$test" = "ip" ] ; then
|
||||
echo "🏴 Running tests with the network 0.0.0.0/0 in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IP: ""@BLACKLIST_IP: "0.0.0.0/0"@' {} \;
|
||||
elif [ "$test" = "deactivated" ] ; then
|
||||
echo "🏴 Running tests when deactivating the blacklist ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BLACKLIST: "yes"@USE_BLACKLIST: "no"@' {} \;
|
||||
elif [ "$test" = "ignore_ip" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_ip set to 192.168.0.3 ..."
|
||||
echo "ℹ️ Keeping the network 0.0.0.0/0 in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BLACKLIST: "no"@USE_BLACKLIST: "yes"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_IP: ""@BLACKLIST_IGNORE_IP: "192.168.0.3"@' {} \;
|
||||
elif [ "$test" = "ignore_ip_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_ip_urls set to http://blacklist-api:8080/ip ..."
|
||||
echo "ℹ️ Keeping the network 0.0.0.0/0 in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_IP: "192.168.0.3"@BLACKLIST_IGNORE_IP: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_IP_URLS: ""@BLACKLIST_IGNORE_IP_URLS: "http://blacklist-api:8080/ip"@' {} \;
|
||||
elif [ "$test" = "ip_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ip url set to http://blacklist-api:8080/ip ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_IP_URLS: "http://blacklist-api:8080/ip"@BLACKLIST_IGNORE_IP_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IP: "0.0.0.0/0"@BLACKLIST_IP: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IP_URLS: ""@BLACKLIST_IP_URLS: "http://blacklist-api:8080/ip"@' {} \;
|
||||
elif [ "$test" = "rdns" ] ; then
|
||||
echo "🏴 Running tests with blacklist's rdns set to .bw-services ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IP_URLS: "http://blacklist-api:8080/ip"@BLACKLIST_IP_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_RDNS: ""@BLACKLIST_RDNS: ".bw-services"@' {} \;
|
||||
elif [ "$test" = "rdns_global" ] ; then
|
||||
echo "🏴 Running tests when blacklist's rdns also scans local ip addresses ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_RDNS_GLOBAL: "yes"@BLACKLIST_RDNS_GLOBAL: "no"@' {} \;
|
||||
elif [ "$test" = "ignore_rdns" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_rdns set to .bw-services ..."
|
||||
echo "ℹ️ Keeping the rdns also scanning local ip addresses ..."
|
||||
echo "ℹ️ Keeping the rdns .bw-services in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_RDNS: ""@BLACKLIST_IGNORE_RDNS: ".bw-services"@' {} \;
|
||||
elif [ "$test" = "ignore_rdns_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_rdns_urls set to http://blacklist-api:8080/rdns ..."
|
||||
echo "ℹ️ Keeping the rdns also scanning local ip addresses ..."
|
||||
echo "ℹ️ Keeping the rdns .bw-services in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_RDNS: ".bw-services"@BLACKLIST_IGNORE_RDNS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_RDNS_URLS: ""@BLACKLIST_IGNORE_RDNS_URLS: "http://blacklist-api:8080/rdns"@' {} \;
|
||||
elif [ "$test" = "rdns_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's rdns url set to http://blacklist-api:8080/rdns ..."
|
||||
echo "ℹ️ Keeping the rdns also scanning local ip addresses ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_RDNS_URLS: "http://blacklist-api:8080/rdns"@BLACKLIST_IGNORE_RDNS_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_RDNS: ".bw-services"@BLACKLIST_RDNS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_RDNS_URLS: ""@BLACKLIST_RDNS_URLS: "http://blacklist-api:8080/rdns"@' {} \;
|
||||
elif [ "$test" = "asn" ] ; then
|
||||
echo "🏴 Running tests with blacklist's asn set to $as_number ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_RDNS_GLOBAL: "no"@BLACKLIST_RDNS_GLOBAL: "yes"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_RDNS_URLS: "http://blacklist-api:8080/rdns"@BLACKLIST_RDNS_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_ASN: ""@BLACKLIST_ASN: "'"$as_number"'"@' {} \;
|
||||
elif [ "$test" = "ignore_asn" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_asn set to $as_number ..."
|
||||
echo "ℹ️ Keeping the asn $as_number in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_ASN: ""@BLACKLIST_IGNORE_ASN: "'"$as_number"'"@' {} \;
|
||||
elif [ "$test" = "ignore_asn_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_asn_urls set to http://blacklist-api:8080/asn ..."
|
||||
echo "ℹ️ Keeping the asn $as_number in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_ASN: "'"$as_number"'"@BLACKLIST_IGNORE_ASN: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_ASN_URLS: ""@BLACKLIST_IGNORE_ASN_URLS: "http://blacklist-api:8080/asn"@' {} \;
|
||||
elif [ "$test" = "asn_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's asn url set to http://blacklist-api:8080/asn ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_ASN_URLS: "http://blacklist-api:8080/asn"@BLACKLIST_IGNORE_ASN_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_ASN: "'"$as_number"'"@BLACKLIST_ASN: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_ASN_URLS: ""@BLACKLIST_ASN_URLS: "http://blacklist-api:8080/asn"@' {} \;
|
||||
elif [ "$test" = "user_agent" ] ; then
|
||||
echo "🏴 Running tests with blacklist's user_agent set to BunkerBot ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_ASN_URLS: "http://blacklist-api:8080/asn"@BLACKLIST_ASN_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_USER_AGENT: ""@BLACKLIST_USER_AGENT: "BunkerBot"@' {} \;
|
||||
elif [ "$test" = "ignore_user_agent" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_user_agent set to BunkerBot ..."
|
||||
echo "ℹ️ Keeping the user_agent BunkerBot in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_USER_AGENT: ""@BLACKLIST_IGNORE_USER_AGENT: "BunkerBot"@' {} \;
|
||||
elif [ "$test" = "ignore_user_agent_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_user_agent_urls set to http://blacklist-api:8080/user_agent ..."
|
||||
echo "ℹ️ Keeping the user_agent BunkerBot in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_USER_AGENT: "BunkerBot"@BLACKLIST_IGNORE_USER_AGENT: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_USER_AGENT_URLS: ""@BLACKLIST_IGNORE_USER_AGENT_URLS: "http://blacklist-api:8080/user_agent"@' {} \;
|
||||
elif [ "$test" = "user_agent_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's user_agent url set to http://blacklist-api:8080/user_agent ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_USER_AGENT_URLS: "http://blacklist-api:8080/user_agent"@BLACKLIST_IGNORE_USER_AGENT_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_USER_AGENT: "BunkerBot"@BLACKLIST_USER_AGENT: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_USER_AGENT_URLS: ""@BLACKLIST_USER_AGENT_URLS: "http://blacklist-api:8080/user_agent"@' {} \;
|
||||
elif [ "$test" = "uri" ] ; then
|
||||
echo "🏴 Running tests with blacklist's uri set to /admin ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_USER_AGENT_URLS: "http://blacklist-api:8080/user_agent"@BLACKLIST_USER_AGENT_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_URI: ""@BLACKLIST_URI: "/admin"@' {} \;
|
||||
elif [ "$test" = "ignore_uri" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_uri set to /admin ..."
|
||||
echo "ℹ️ Keeping the uri /admin in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_URI: ""@BLACKLIST_IGNORE_URI: "/admin"@' {} \;
|
||||
elif [ "$test" = "ignore_uri_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's ignore_ip_urls set to http://blacklist-api:8080/uri ..."
|
||||
echo "ℹ️ Keeping the uri /admin in the ban list ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_URI: "/admin"@BLACKLIST_IGNORE_URI: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_URI_URLS: ""@BLACKLIST_IGNORE_URI_URLS: "http://blacklist-api:8080/uri"@' {} \;
|
||||
elif [ "$test" = "uri_urls" ] ; then
|
||||
echo "🏴 Running tests with blacklist's uri url set to http://blacklist-api:8080/uri ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_IGNORE_URI_URLS: "http://blacklist-api:8080/uri"@BLACKLIST_IGNORE_URI_URLS: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_URI: "/admin"@BLACKLIST_URI: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_URI_URLS: ""@BLACKLIST_URI_URLS: "http://blacklist-api:8080/uri"@' {} \;
|
||||
fi
|
||||
|
||||
echo "🏴 Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🏴 Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "🏴 Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("blacklist-bw-1" "blacklist-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "🏴 Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "🏴 Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
if [[ "$test" = "asn" || "$test" = "ignore_asn" || "$test" = "ignore_asn_urls" || "$test" = "asn_urls" ]] ; then
|
||||
docker compose -f docker-compose.test.yml up global-tests --abort-on-container-exit --exit-code-from global-tests 2>/dev/null
|
||||
else
|
||||
docker compose -f docker-compose.test.yml up tests --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
fi
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🏴 Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb, BunkerWeb Scheduler and Custom API logs ..."
|
||||
docker compose logs bw bw-scheduler blacklist-api
|
||||
exit 1
|
||||
else
|
||||
echo "🏴 Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "🏴 Tests are done ! ✅"
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,17 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_BROTLI: "no"
|
||||
extra_hosts:
|
||||
- "www.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
|
@ -0,0 +1,67 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24"
|
||||
HTTP_PORT: "80"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
USE_REVERSE_PROXY: "yes"
|
||||
REVERSE_PROXY_HOST: "http://app1:8080"
|
||||
REVERSE_PROXY_URL: "/"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? BROTLI settings
|
||||
USE_BROTLI: "no"
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
app1:
|
||||
image: nginxdemos/nginx-hello
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.4
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-docker:
|
|
@ -0,0 +1,62 @@
|
|||
from contextlib import suppress
|
||||
from os import getenv
|
||||
from requests import RequestException, get, head
|
||||
from traceback import format_exc
|
||||
from time import sleep
|
||||
|
||||
|
||||
try:
|
||||
ready = False
|
||||
retries = 0
|
||||
while not ready:
|
||||
with suppress(RequestException):
|
||||
status_code = get(
|
||||
"http://www.example.com", headers={"Host": "www.example.com"}
|
||||
).status_code
|
||||
|
||||
if status_code >= 500:
|
||||
print("❌ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
ready = status_code < 400
|
||||
|
||||
if retries > 10:
|
||||
print("❌ The service took too long to be ready, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not ready:
|
||||
retries += 1
|
||||
print(
|
||||
"⚠️ Waiting for the service to be ready, retrying in 5s ...", flush=True
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
use_brotli = getenv("USE_BROTLI", "no") == "yes"
|
||||
|
||||
print(
|
||||
"ℹ️ Sending a HEAD request to http://www.example.com ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
response = head(
|
||||
"http://www.example.com",
|
||||
headers={"Host": "www.example.com", "Accept-Encoding": "br"},
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
if not use_brotli and response.headers.get("Content-Encoding", "").lower() == "br":
|
||||
print(
|
||||
f"❌ Content-Encoding header is present even if Brotli is deactivated, exiting ...\nheaders: {response.headers}"
|
||||
)
|
||||
exit(1)
|
||||
elif use_brotli and response.headers.get("Content-Encoding", "").lower() != "br":
|
||||
print(
|
||||
f"❌ Content-Encoding header is not present or with the wrong value even if Brotli is activated, exiting ...\nheaders: {response.headers}"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ Brotli is working as expected ...", flush=True)
|
||||
except SystemExit:
|
||||
exit(1)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1 @@
|
|||
requests==2.30.0
|
|
@ -0,0 +1,106 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "📦 Building brotli stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker app1
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📦 Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📦 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BROTLI: "yes"@USE_BROTLI: "no"@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "📦 Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📦 Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "📦 Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
for test in "deactivated" "activated"
|
||||
do
|
||||
if [ "$test" = "deactivated" ] ; then
|
||||
echo "📦 Running tests without brotli ..."
|
||||
elif [ "$test" = "activated" ] ; then
|
||||
echo "📦 Running tests with brotli ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BROTLI: "no"@USE_BROTLI: "yes"@' {} \;
|
||||
fi
|
||||
|
||||
echo "📦 Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📦 Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "📦 Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("brotli-bw-1" "brotli-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "📦 Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "📦 Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
docker compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📦 Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "📦 Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "📦 Tests are done ! ✅"
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/blacklist_api
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8080", "--proxy-headers", "--forwarded-allow-ips", "\"*\"" ]
|
|
@ -0,0 +1,46 @@
|
|||
from uuid import uuid4
|
||||
from fastapi import FastAPI, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
|
||||
app = FastAPI()
|
||||
instance_id = None
|
||||
report_num = 0
|
||||
|
||||
|
||||
@app.get("/ping")
|
||||
async def ping(_: Request):
|
||||
return JSONResponse(status_code=200, content={"result": "ok", "data": "pong"})
|
||||
|
||||
|
||||
@app.post("/register")
|
||||
async def register(_: Request):
|
||||
global instance_id
|
||||
instance_id = str(uuid4())
|
||||
return JSONResponse(status_code=200, content={"result": "ok", "data": instance_id})
|
||||
|
||||
|
||||
@app.post("/report")
|
||||
async def report(_: Request):
|
||||
global report_num
|
||||
report_num += 1
|
||||
return JSONResponse(
|
||||
status_code=200, content={"result": "ok", "data": "Report acknowledged."}
|
||||
)
|
||||
|
||||
|
||||
@app.get("/db")
|
||||
async def db(_: Request):
|
||||
return JSONResponse(status_code=200, content={"result": "ok", "data": []})
|
||||
|
||||
|
||||
@app.get("/instance_id")
|
||||
async def get_instance_id(_: Request):
|
||||
global instance_id
|
||||
return JSONResponse(status_code=200, content={"result": "ok", "data": instance_id})
|
||||
|
||||
|
||||
@app.get("/report_num")
|
||||
async def get_report_num(_: Request):
|
||||
global report_num
|
||||
return JSONResponse(status_code=200, content={"result": "ok", "data": report_num})
|
|
@ -0,0 +1,2 @@
|
|||
fastapi==0.95.1
|
||||
uvicorn[standard]==0.22.0
|
|
@ -0,0 +1,18 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_BUNKERNET: "yes"
|
||||
BUNKERNET_SERVER: "http://bunkernet-api:8080"
|
||||
extra_hosts:
|
||||
- "www.example.com:1.0.0.2"
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 1.0.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
|
@ -0,0 +1,68 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
volumes:
|
||||
- ./index.html:/var/www/html/index.html
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24"
|
||||
HTTP_PORT: "80"
|
||||
USE_BLACKLIST: "no"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? BUNKERNET settings
|
||||
USE_BUNKERNET: "yes"
|
||||
BUNKERNET_SERVER: "http://bunkernet-api:8080"
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 1.0.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
bunkernet-api:
|
||||
build: api
|
||||
networks:
|
||||
bw-docker:
|
||||
bw-services:
|
||||
ipv4_address: 1.0.0.4
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 1.0.0.0/24
|
||||
bw-docker:
|
||||
name: bw-docker
|
|
@ -0,0 +1,80 @@
|
|||
from contextlib import suppress
|
||||
from os import getenv
|
||||
from requests import get
|
||||
from requests.exceptions import RequestException
|
||||
from time import sleep
|
||||
from traceback import format_exc
|
||||
|
||||
try:
|
||||
ready = False
|
||||
retries = 0
|
||||
while not ready:
|
||||
with suppress(RequestException):
|
||||
status_code = get(
|
||||
"http://www.example.com", headers={"Host": "www.example.com"}
|
||||
).status_code
|
||||
|
||||
if status_code >= 500:
|
||||
print("❌ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
ready = status_code < 400
|
||||
|
||||
if retries > 10:
|
||||
print("❌ The service took too long to be ready, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not ready:
|
||||
retries += 1
|
||||
print(
|
||||
"⚠️ Waiting for the service to be ready, retrying in 5s ...", flush=True
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
use_bunkernet = getenv("USE_BUNKERNET", "yes") == "yes"
|
||||
bunkernet_server = getenv("BUNKERNET_SERVER")
|
||||
|
||||
if not bunkernet_server:
|
||||
print("❌ BunkerNet server not specified, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
instance_id = get(f"{bunkernet_server}/instance_id").json()["data"]
|
||||
|
||||
if use_bunkernet and not instance_id:
|
||||
print("❌ BunkerNet plugin did not register, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not use_bunkernet and instance_id:
|
||||
print("❌ BunkerNet plugin registered but it shouldn't, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not use_bunkernet and not instance_id:
|
||||
print("✅ BunkerNet plugin is disabled and not registered ...", flush=True)
|
||||
exit(0)
|
||||
|
||||
print(
|
||||
"ℹ️ Sending a request to http://www.example.com/?id=/etc/passwd ...", flush=True
|
||||
)
|
||||
|
||||
status_code = get(
|
||||
f"http://www.example.com/?id=/etc/passwd",
|
||||
headers={"Host": "www.example.com"},
|
||||
).status_code
|
||||
|
||||
print(f"ℹ️ Status code: {status_code}", flush=True)
|
||||
|
||||
if status_code != 403:
|
||||
print("❌ The request was not blocked, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
sleep(2)
|
||||
|
||||
report_num = get(f"{bunkernet_server}/report_num").json()["data"]
|
||||
|
||||
if report_num < 1:
|
||||
print("❌ The report was not sent, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
print("✅ BunkerNet is working as expected ...", flush=True)
|
||||
except SystemExit as e:
|
||||
exit(e.code)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1 @@
|
|||
requests==2.30.0
|
|
@ -0,0 +1,115 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "🕸️ Building bunkernet stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🕸️ Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🕸️ Building custom api image ..."
|
||||
docker compose build bunkernet-api
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🕸️ Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🕸️ Building tests images ..."
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🕸️ Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BUNKERNET: "no"@USE_BUNKERNET: "yes"@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🕸️ Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🕸️ Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🕸️ Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
for test in "activated" "deactivated"
|
||||
do
|
||||
if [ "$test" = "activated" ] ; then
|
||||
echo "🕸️ Running tests with bunkernet activated ..."
|
||||
elif [ "$test" = "deactivated" ] ; then
|
||||
echo "🕸️ Running tests without bunkernet ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_BUNKERNET: "yes"@USE_BUNKERNET: "no"@' {} \;
|
||||
fi
|
||||
|
||||
echo "🕸️ Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🕸️ Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "🕸️ Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("bunkernet-bw-1" "bunkernet-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "🕸️ Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "🕸️ Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
docker compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🏴 Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb, BunkerWeb Scheduler and Custom API logs ..."
|
||||
docker compose logs bw bw-scheduler bunkernet-api
|
||||
exit 1
|
||||
else
|
||||
echo "🏴 Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "🕸️ Tests are done ! ✅"
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,20 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_CLIENT_CACHE: "no"
|
||||
CLIENT_CACHE_EXTENSIONS: "jpg|jpeg|png|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2"
|
||||
CLIENT_CACHE_ETAG: "yes"
|
||||
CLIENT_CACHE_CONTROL: "public, max-age=15552000"
|
||||
extra_hosts:
|
||||
- "www.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
|
@ -0,0 +1,65 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
ports:
|
||||
- 80:80
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
volumes:
|
||||
- ./image.png:/var/www/html/image.png
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24"
|
||||
HTTP_PORT: "80"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? CLIENT_CACHE settings
|
||||
USE_CLIENT_CACHE: "no"
|
||||
CLIENT_CACHE_EXTENSIONS: "jpg|jpeg|png|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2"
|
||||
CLIENT_CACHE_ETAG: "yes"
|
||||
CLIENT_CACHE_CONTROL: "public, max-age=15552000"
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-docker:
|
Binary file not shown.
After Width: | Height: | Size: 30 KiB |
|
@ -0,0 +1,89 @@
|
|||
from contextlib import suppress
|
||||
from os import getenv
|
||||
from requests import RequestException, get
|
||||
from traceback import format_exc
|
||||
from time import sleep
|
||||
|
||||
|
||||
try:
|
||||
ready = False
|
||||
retries = 0
|
||||
while not ready:
|
||||
with suppress(RequestException):
|
||||
status_code = get(
|
||||
"http://www.example.com/image.png", headers={"Host": "www.example.com"}
|
||||
).status_code
|
||||
|
||||
if status_code >= 500:
|
||||
print("❌ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
ready = status_code < 400
|
||||
|
||||
if retries > 10:
|
||||
print("❌ The service took too long to be ready, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not ready:
|
||||
retries += 1
|
||||
print(
|
||||
"⚠️ Waiting for the service to be ready, retrying in 5s ...", flush=True
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
use_client_cache = getenv("USE_CLIENT_CACHE", "no") == "yes"
|
||||
default_cache_extensions = (
|
||||
getenv(
|
||||
"CLIENT_CACHE_EXTENSIONS",
|
||||
"jpg|jpeg|png|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2",
|
||||
)
|
||||
== "jpg|jpeg|png|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2"
|
||||
)
|
||||
client_cache_etag = getenv("CLIENT_CACHE_ETAG", "yes") == "yes"
|
||||
client_cache_control = getenv("CLIENT_CACHE_CONTROL", "public, max-age=15552000")
|
||||
|
||||
print(
|
||||
"ℹ️ Sending a request to http://www.example.com/image.png ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
response = get(
|
||||
"http://www.example.com/image.png", headers={"Host": "www.example.com"}
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
if not use_client_cache:
|
||||
if "Cache-Control" in response.headers:
|
||||
print(
|
||||
f"❌ Cache-Control header is present even if Client cache is deactivated, exiting ...\nheaders: {response.headers}"
|
||||
)
|
||||
exit(1)
|
||||
else:
|
||||
if "Cache-Control" not in response.headers and default_cache_extensions:
|
||||
print(
|
||||
f"❌ Cache-Control header is not present even if Client cache is activated, exiting ...\nheaders: {response.headers}"
|
||||
)
|
||||
exit(1)
|
||||
elif not default_cache_extensions and "Cache-Control" in response.headers:
|
||||
print(
|
||||
f"❌ Cache-Control header is present even if the png extension is not in the list of extensions, exiting ...\nheaders: {response.headers}",
|
||||
flush=True,
|
||||
)
|
||||
elif not client_cache_etag and "ETag" in response.headers:
|
||||
print(
|
||||
f"❌ ETag header is present even if Client cache ETag is deactivated, exiting ...\nheaders: {response.headers}"
|
||||
)
|
||||
exit(1)
|
||||
elif default_cache_extensions and client_cache_control != response.headers.get(
|
||||
"Cache-Control"
|
||||
):
|
||||
print(
|
||||
f"❌ Cache-Control header is not equal to the expected value, exiting ...\nheaders: {response.headers}"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ Client cache is working as expected ...", flush=True)
|
||||
except SystemExit:
|
||||
exit(1)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1 @@
|
|||
requests==2.30.0
|
|
@ -0,0 +1,120 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "📝 Building clientcache stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📝 Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📝 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_CLIENT_CACHE: "yes"@USE_CLIENT_CACHE: "no"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CLIENT_CACHE_EXTENSIONS: "jpg|jpeg|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2"@CLIENT_CACHE_EXTENSIONS: "jpg|jpeg|png|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CLIENT_CACHE_ETAG: "no"@CLIENT_CACHE_ETAG: "yes"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CLIENT_CACHE_CONTROL: "public, max-age=3600"@CLIENT_CACHE_CONTROL: "public, max-age=15552000"@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "📝 Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📝 Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "📝 Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
for test in "deactivated" "activated" "cache_extensions" "cache_etag" "cache_control"
|
||||
do
|
||||
if [ "$test" = "deactivated" ] ; then
|
||||
echo "📝 Running tests without clientcache ..."
|
||||
elif [ "$test" = "activated" ] ; then
|
||||
echo "📝 Running tests with clientcache ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_CLIENT_CACHE: "no"@USE_CLIENT_CACHE: "yes"@' {} \;
|
||||
elif [ "$test" = "cache_extensions" ] ; then
|
||||
echo "📝 Running tests when removing png from the cache extensions ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CLIENT_CACHE_EXTENSIONS: "jpg|jpeg|png|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2"@CLIENT_CACHE_EXTENSIONS: "jpg|jpeg|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2"@' {} \;
|
||||
elif [ "$test" = "cache_etag" ] ; then
|
||||
echo "📝 Running tests when deactivating the etag ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CLIENT_CACHE_EXTENSIONS: "jpg|jpeg|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2"@CLIENT_CACHE_EXTENSIONS: "jpg|jpeg|png|bmp|ico|svg|tif|css|js|otf|ttf|eot|woff|woff2"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CLIENT_CACHE_ETAG: "yes"@CLIENT_CACHE_ETAG: "no"@' {} \;
|
||||
elif [ "$test" = "cache_control" ] ; then
|
||||
echo "📝 Running tests whith clientcache control set to public, max-age=3600 ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CLIENT_CACHE_ETAG: "no"@CLIENT_CACHE_ETAG: "yes"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CLIENT_CACHE_CONTROL: "public, max-age=15552000"@CLIENT_CACHE_CONTROL: "public, max-age=3600"@' {} \;
|
||||
fi
|
||||
|
||||
echo "📝 Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📝 Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "📝 Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("clientcache-bw-1" "clientcache-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "📝 Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "📝 Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
docker compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "📝 Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "📝 Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "📝 Tests are done ! ✅"
|
|
@ -0,0 +1,25 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
# Install firefox and geckodriver
|
||||
RUN apk add --no-cache --virtual .build-deps curl grep zip && \
|
||||
apk add --no-cache firefox
|
||||
|
||||
# Installing geckodriver for firefox...
|
||||
RUN GECKODRIVER_VERSION=`curl -i https://github.com/mozilla/geckodriver/releases/latest | grep -Po 'v[0-9]+.[0-9]+.[0-9]+'` && \
|
||||
wget -O geckodriver.tar.gz https://github.com/mozilla/geckodriver/releases/download/$GECKODRIVER_VERSION/geckodriver-$GECKODRIVER_VERSION-linux64.tar.gz && \
|
||||
tar -C /usr/local/bin -xzvf geckodriver.tar.gz && \
|
||||
chmod +x /usr/local/bin/geckodriver && \
|
||||
rm geckodriver.tar.gz
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,23 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_CORS: "no"
|
||||
CORS_ALLOW_ORIGIN: "*"
|
||||
CORS_EXPOSE_HEADERS: "Content-Length,Content-Range"
|
||||
CORS_MAX_AGE: "86400"
|
||||
CORS_ALLOW_CREDENTIALS: "no"
|
||||
CORS_ALLOW_METHODS: "GET, POST, OPTIONS"
|
||||
CORS_ALLOW_HEADERS: "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range"
|
||||
extra_hosts:
|
||||
- "www.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
|
@ -0,0 +1,69 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
volumes:
|
||||
- ./index.html:/var/www/html/index.html
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24"
|
||||
HTTP_PORT: "80"
|
||||
HTTPS_PORT: "443"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
LOG_LEVEL: "info"
|
||||
GENERATE_SELF_SIGNED_SSL: "yes"
|
||||
ALLOWED_METHODS: "GET|POST|HEAD|OPTIONS"
|
||||
|
||||
# ? CORS settings
|
||||
USE_CORS: "no"
|
||||
CORS_ALLOW_ORIGIN: "*"
|
||||
CORS_EXPOSE_HEADERS: "Content-Length,Content-Range"
|
||||
CORS_MAX_AGE: "86400"
|
||||
CORS_ALLOW_CREDENTIALS: "no"
|
||||
CORS_ALLOW_METHODS: "GET, POST, OPTIONS"
|
||||
CORS_ALLOW_HEADERS: "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range"
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-docker:
|
|
@ -0,0 +1,220 @@
|
|||
from contextlib import suppress
|
||||
from os import getenv
|
||||
from requests import RequestException, get, head, options
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.firefox.options import Options
|
||||
from selenium.common.exceptions import JavascriptException
|
||||
from traceback import format_exc
|
||||
from time import sleep
|
||||
|
||||
|
||||
try:
|
||||
ready = False
|
||||
retries = 0
|
||||
while not ready:
|
||||
with suppress(RequestException):
|
||||
status_code = get(
|
||||
"https://www.example.com",
|
||||
headers={"Host": "www.example.com"},
|
||||
verify=False,
|
||||
).status_code
|
||||
|
||||
if status_code >= 500:
|
||||
print("❌ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
ready = status_code < 400
|
||||
|
||||
if retries > 10:
|
||||
print("❌ The service took too long to be ready, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not ready:
|
||||
retries += 1
|
||||
print(
|
||||
"⚠️ Waiting for the service to be ready, retrying in 5s ...", flush=True
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
firefox_options = Options()
|
||||
firefox_options.add_argument("--headless")
|
||||
|
||||
use_cors = getenv("USE_CORS", "no")
|
||||
cors_allow_origin = getenv("CORS_ALLOW_ORIGIN", "*")
|
||||
cors_expose_headers = getenv("CORS_EXPOSE_HEADERS", "Content-Length,Content-Range")
|
||||
cors_max_age = getenv("CORS_MAX_AGE", "86400")
|
||||
cors_allow_credentials = getenv("CORS_ALLOW_CREDENTIALS", "no") == "yes"
|
||||
cors_allow_methods = getenv("CORS_ALLOW_METHODS", "GET, POST, OPTIONS")
|
||||
cors_allow_headers = getenv(
|
||||
"CORS_ALLOW_HEADERS",
|
||||
"DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range",
|
||||
)
|
||||
|
||||
if any(
|
||||
[
|
||||
cors_allow_origin != "*",
|
||||
cors_expose_headers != "Content-Length,Content-Range",
|
||||
]
|
||||
):
|
||||
print(
|
||||
"ℹ️ Sending a HEAD request to https://www.example.com ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
response = head(
|
||||
"https://www.example.com", headers={"Host": "www.example.com"}, verify=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
if any(
|
||||
header in response.headers
|
||||
for header in (
|
||||
"Access-Control-Max-Age",
|
||||
"Access-Control-Allow-Credentials",
|
||||
"Access-Control-Allow-Methods",
|
||||
"Access-Control-Allow-Headers",
|
||||
)
|
||||
):
|
||||
print(
|
||||
f"❌ One of the preflight request headers is present in the response headers, it should not be ...\nheaders: {response.headers}",
|
||||
)
|
||||
exit(1)
|
||||
elif cors_allow_origin != response.headers.get("Access-Control-Allow-Origin"):
|
||||
print(
|
||||
f"❌ The Access-Control-Allow-Origin header is set to {response.headers.get('Access-Control-Allow-Origin', 'missing')}, it should be {cors_allow_origin} ...\nheaders: {response.headers}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif cors_allow_origin != "*":
|
||||
print(
|
||||
f"✅ The Access-Control-Allow-Origin header is set to {cors_allow_origin} ...",
|
||||
flush=True,
|
||||
)
|
||||
elif cors_expose_headers != response.headers.get(
|
||||
"Access-Control-Expose-Headers"
|
||||
):
|
||||
print(
|
||||
f"❌ The Access-Control-Expose-Headers header is set to {response.headers.get('Access-Control-Expose-Headers', 'missing')}, it should be {cors_expose_headers} ...\nheaders: {response.headers}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif cors_expose_headers != "Content-Length,Content-Range":
|
||||
print(
|
||||
f"✅ The Access-Control-Expose-Headers header is set to {cors_expose_headers} ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
exit(0)
|
||||
elif any(
|
||||
[
|
||||
cors_max_age != "86400",
|
||||
cors_allow_credentials,
|
||||
cors_allow_methods != "GET, POST, OPTIONS",
|
||||
cors_allow_headers
|
||||
!= "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range",
|
||||
]
|
||||
):
|
||||
print(
|
||||
"ℹ️ Sending a preflight request to https://www.example.com ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
response = options(
|
||||
"https://www.example.com", headers={"Host": "www.example.com"}, verify=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
if (
|
||||
not cors_allow_credentials
|
||||
and "Access-Control-Allow-Credentials" in response.headers
|
||||
):
|
||||
print(
|
||||
f"❌ The Access-Control-Allow-Credentials header is present in the response headers while the setting CORS_ALLOW_CREDENTIALS is set to {cors_allow_credentials}, it should not be ...\nheaders: {response.headers}",
|
||||
)
|
||||
exit(1)
|
||||
elif cors_max_age != response.headers.get("Access-Control-Max-Age"):
|
||||
print(
|
||||
f"❌ The Access-Control-Max-Age header is set to {response.headers.get('Access-Control-Max-Age', 'missing')}, it should be {cors_max_age} ...\nheaders: {response.headers}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif cors_max_age != "86400":
|
||||
print(
|
||||
f"✅ The Access-Control-Max-Age header is set to {cors_max_age} ...",
|
||||
flush=True,
|
||||
)
|
||||
elif (
|
||||
cors_allow_credentials
|
||||
and "Access-Control-Allow-Credentials" not in response.headers
|
||||
):
|
||||
print(
|
||||
f"❌ The Access-Control-Allow-Credentials header is not present in the response headers while the setting CORS_ALLOW_CREDENTIALS is set to {cors_allow_credentials}, it should be ...\nheaders: {response.headers}",
|
||||
)
|
||||
exit(1)
|
||||
elif cors_allow_methods != response.headers.get("Access-Control-Allow-Methods"):
|
||||
print(
|
||||
f"❌ The Access-Control-Allow-Methods header is set to {response.headers.get('Access-Control-Allow-Methods', 'missing')}, it should be {cors_allow_methods} ...\nheaders: {response.headers}",
|
||||
)
|
||||
exit(1)
|
||||
elif cors_allow_methods != "GET, POST, OPTIONS":
|
||||
print(
|
||||
f"✅ The Access-Control-Allow-Methods is set to {cors_allow_methods} ...",
|
||||
flush=True,
|
||||
)
|
||||
elif cors_allow_headers != response.headers.get("Access-Control-Allow-Headers"):
|
||||
print(
|
||||
f"❌ The Access-Control-Allow-Headers header is set to {response.headers.get('Access-Control-Allow-Headers', 'missing')}, it should be {cors_allow_headers} ...\nheaders: {response.headers}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif (
|
||||
cors_allow_headers
|
||||
!= "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range"
|
||||
):
|
||||
print(
|
||||
f"✅ The Access-Control-Allow-Headers header is set to {cors_allow_headers} ...",
|
||||
flush=True,
|
||||
)
|
||||
else:
|
||||
print(
|
||||
f"✅ The Access-Control-Allow-Credentials header is present and set to {cors_allow_credentials} ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
exit(0)
|
||||
|
||||
print("ℹ️ Starting Firefox ...", flush=True)
|
||||
with webdriver.Firefox(options=firefox_options) as driver:
|
||||
driver.delete_all_cookies()
|
||||
driver.maximize_window()
|
||||
|
||||
print(
|
||||
"ℹ️ Sending a javascript request to https://www.example.com ...",
|
||||
flush=True,
|
||||
)
|
||||
error = False
|
||||
|
||||
try:
|
||||
driver.execute_script(
|
||||
"""var xhttp = new XMLHttpRequest();
|
||||
xhttp.open("GET", "https://www.example.com", false);
|
||||
xhttp.setRequestHeader("Host", "www.example.com");
|
||||
xhttp.send();"""
|
||||
)
|
||||
except JavascriptException as e:
|
||||
if not f"{e}".startswith("Message: NetworkError"):
|
||||
print(f"❌ {e}", flush=True)
|
||||
error = True
|
||||
|
||||
if use_cors == "no" and not error:
|
||||
print("❌ CORS is enabled, it shouldn't be, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif use_cors == "yes" and error:
|
||||
print("❌ CORS are not working as expected, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
print("✅ CORS are working as expected ...", flush=True)
|
||||
except SystemExit as e:
|
||||
exit(e.code)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1,2 @@
|
|||
requests==2.30.0
|
||||
selenium==4.9.1
|
|
@ -0,0 +1,135 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "🛰️ Building cors stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🛰️ Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🛰️ Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_CORS: "yes"@USE_CORS: "no"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_ORIGIN: "http://www.example.com"@CORS_ALLOW_ORIGIN: "\*"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_EXPOSE_HEADERS: "X-Test"@CORS_EXPOSE_HEADERS: "Content-Length,Content-Range"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_MAX_AGE: "3600"@CORS_MAX_AGE: "86400"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_CREDENTIALS: "yes"@CORS_ALLOW_CREDENTIALS: "no"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_METHODS: "GET, HEAD, POST, OPTIONS"@CORS_ALLOW_METHODS: "GET, POST, OPTIONS"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_HEADERS: "X-Test"@CORS_ALLOW_HEADERS: "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range"@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🛰️ Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🛰️ Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🛰️ Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
for test in "deactivated" "activated" "allow_origin" "expose_headers" "max_age" "allow_credentials" "allow_methods" "allow_headers"
|
||||
do
|
||||
if [ "$test" = "deactivated" ] ; then
|
||||
echo "🛰️ Running tests without cors ..."
|
||||
elif [ "$test" = "activated" ] ; then
|
||||
echo "🛰️ Running tests with cors ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_CORS: "no"@USE_CORS: "yes"@' {} \;
|
||||
elif [ "$test" = "allow_origin" ] ; then
|
||||
echo "🛰️ Running tests with cors allow origin set to http://www.example.com ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_ORIGIN: "\*"@CORS_ALLOW_ORIGIN: "http://www.example.com"@' {} \;
|
||||
elif [ "$test" = "expose_headers" ] ; then
|
||||
echo "🛰️ Running tests with cors expose headers set to X-Test ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_ORIGIN: "http://www.example.com"@CORS_ALLOW_ORIGIN: "\*"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_EXPOSE_HEADERS: "Content-Length,Content-Range"@CORS_EXPOSE_HEADERS: "X-Test"@' {} \;
|
||||
elif [ "$test" = "max_age" ] ; then
|
||||
echo "🛰️ Running tests with cors max age set to 3600 ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_EXPOSE_HEADERS: "X-Test"@CORS_EXPOSE_HEADERS: "Content-Length,Content-Range"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_MAX_AGE: "86400"@CORS_MAX_AGE: "3600"@' {} \;
|
||||
elif [ "$test" = "allow_credentials" ] ; then
|
||||
echo "🛰️ Running tests with cors allow credentials is set to yes ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_MAX_AGE: "3600"@CORS_MAX_AGE: "86400"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_CREDENTIALS: "no"@CORS_ALLOW_CREDENTIALS: "yes"@' {} \;
|
||||
elif [ "$test" = "allow_methods" ] ; then
|
||||
echo "🛰️ Running tests with cors allow methods is set to GET, HEAD, POST, OPTIONS ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_CREDENTIALS: "yes"@CORS_ALLOW_CREDENTIALS: "no"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_METHODS: "GET, POST, OPTIONS"@CORS_ALLOW_METHODS: "GET, HEAD, POST, OPTIONS"@' {} \;
|
||||
elif [ "$test" = "allow_headers" ] ; then
|
||||
echo "🛰️ Running tests with cors allow headers is set to X-Test ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_METHODS: "GET, HEAD, POST, OPTIONS"@CORS_ALLOW_METHODS: "GET, POST, OPTIONS"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@CORS_ALLOW_HEADERS: "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range"@CORS_ALLOW_HEADERS: "X-Test"@' {} \;
|
||||
fi
|
||||
|
||||
echo "🛰️ Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🛰️ Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "🛰️ Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("cors-bw-1" "cors-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "🛰️ Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "🛰️ Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
docker compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🛰️ Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "🛰️ Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "🛰️ Tests are done ! ✅"
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,34 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests-fr:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
COUNTRY: "FR"
|
||||
BLACKLIST_COUNTRY: ""
|
||||
WHITELIST_COUNTRY: ""
|
||||
extra_hosts:
|
||||
- "www.example.com:2.0.0.2"
|
||||
networks:
|
||||
bw-fr-network:
|
||||
ipv4_address: 2.0.0.3
|
||||
|
||||
tests-us:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
COUNTRY: "US"
|
||||
BLACKLIST_COUNTRY: ""
|
||||
WHITELIST_COUNTRY: ""
|
||||
extra_hosts:
|
||||
- "www.example.com:8.0.0.2"
|
||||
networks:
|
||||
bw-us-network:
|
||||
ipv4_address: 8.0.0.3
|
||||
|
||||
networks:
|
||||
bw-fr-network:
|
||||
external: true
|
||||
bw-us-network:
|
||||
external: true
|
|
@ -0,0 +1,70 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
volumes:
|
||||
- ./index.html:/var/www/html/index.html
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24 192.168.0.3"
|
||||
HTTP_PORT: "80"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? COUNTRY settings
|
||||
BLACKLIST_COUNTRY: ""
|
||||
WHITELIST_COUNTRY: ""
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-us-network:
|
||||
ipv4_address: 8.0.0.2
|
||||
bw-fr-network:
|
||||
ipv4_address: 2.0.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-us-network:
|
||||
name: bw-us-network
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 8.0.0.0/8
|
||||
bw-fr-network:
|
||||
name: bw-fr-network
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 2.0.0.0/8
|
||||
bw-docker:
|
||||
name: bw-docker
|
|
@ -0,0 +1,75 @@
|
|||
from contextlib import suppress
|
||||
from os import getenv
|
||||
from requests import get
|
||||
from requests.exceptions import RequestException
|
||||
from time import sleep
|
||||
from traceback import format_exc
|
||||
|
||||
try:
|
||||
ready = False
|
||||
retries = 0
|
||||
while not ready:
|
||||
with suppress(RequestException):
|
||||
status_code = get(
|
||||
"http://www.example.com", headers={"Host": "www.example.com"}
|
||||
).status_code
|
||||
|
||||
if status_code >= 500:
|
||||
print("❌ An error occurred with the server, exiting ...", flush=True)
|
||||
exit(1)
|
||||
|
||||
ready = status_code < 400 or status_code == 403
|
||||
|
||||
if retries > 10:
|
||||
print("❌ The service took too long to be ready, exiting ...", flush=True)
|
||||
exit(1)
|
||||
elif not ready:
|
||||
retries += 1
|
||||
print(
|
||||
"⚠️ Waiting for the service to be ready, retrying in 5s ...", flush=True
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
country = getenv("COUNTRY")
|
||||
blacklist_country = getenv("BLACKLIST_COUNTRY", "")
|
||||
whitelist_country = getenv("WHITELIST_COUNTRY", "")
|
||||
|
||||
print(
|
||||
"ℹ️ Sending a request to http://www.example.com ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
status_code = get(
|
||||
f"http://www.example.com",
|
||||
headers={"Host": "www.example.com"},
|
||||
).status_code
|
||||
|
||||
if status_code == 403:
|
||||
if not blacklist_country and not whitelist_country:
|
||||
print(
|
||||
"❌ Got rejected even though there are no country blacklisted or whitelisted, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif country == whitelist_country:
|
||||
print(
|
||||
f"❌ Got rejected even if the current country ({country}) is whitelisted, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ Got rejected, as expected ...")
|
||||
else:
|
||||
if country == blacklist_country:
|
||||
print(
|
||||
f"❌ Didn't get rejected even if the current country ({country}) is blacklisted, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ Didn't get rejected, as expected ...")
|
||||
except SystemExit:
|
||||
exit(1)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1 @@
|
|||
requests==2.30.0
|
|
@ -0,0 +1,124 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "🌍 Building country stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🌍 Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🌍 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_COUNTRY: "US"@BLACKLIST_COUNTRY: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@WHITELIST_COUNTRY: "FR"@WHITELIST_COUNTRY: ""@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🌍 Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🌍 Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🌍 Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
for test in "deactivated" "blacklist" "whitelist"
|
||||
do
|
||||
if [ "$test" = "deactivated" ] ; then
|
||||
echo "🌍 Running tests without the country plugin ..."
|
||||
elif [ "$test" = "blacklist" ] ; then
|
||||
echo "🌍 Running tests when blacklisting United States ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_COUNTRY: ""@BLACKLIST_COUNTRY: "US"@' {} \;
|
||||
elif [ "$test" = "whitelist" ] ; then
|
||||
echo "🌍 Running tests when whitelisting France ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@BLACKLIST_COUNTRY: "US"@BLACKLIST_COUNTRY: ""@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@WHITELIST_COUNTRY: ""@WHITELIST_COUNTRY: "FR"@' {} \;
|
||||
fi
|
||||
|
||||
echo "🌍 Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🌍 Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "🌍 Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("country-bw-1" "country-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "🌍 Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "🌍 Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
echo "🌍 Starting the FR container"
|
||||
docker compose -f docker-compose.test.yml up tests-fr --abort-on-container-exit --exit-code-from tests-fr 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🌍 Test \"$test\" failed for the FR container ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "🌍 Test \"$test\" succeeded for the FR container ✅"
|
||||
fi
|
||||
|
||||
echo "🌍 Starting the US container"
|
||||
docker compose -f docker-compose.test.yml up tests-us --abort-on-container-exit --exit-code-from tests-us 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🌍 Test \"$test\" failed for the US container ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "🌍 Test \"$test\" succeeded for the US container ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "🌍 Tests are done ! ✅"
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,9 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
init:
|
||||
build: init
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
volumes:
|
||||
- ./init/certs:/certs
|
|
@ -0,0 +1,17 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_CUSTOM_SSL: "no"
|
||||
extra_hosts:
|
||||
- "www.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
|
@ -0,0 +1,69 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
ports:
|
||||
- 80:80
|
||||
- 443:443
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
volumes:
|
||||
- ./index.html:/var/www/html/index.html
|
||||
environment:
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24 192.168.0.3"
|
||||
HTTP_PORT: "80"
|
||||
HTTPS_PORT: "443"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
LOG_LEVEL: "info"
|
||||
|
||||
# ? CUSTOM_CERT settings
|
||||
USE_CUSTOM_SSL: "no"
|
||||
CUSTOM_SSL_CERT: "/certs/certificate.pem"
|
||||
CUSTOM_SSL_KEY: "/certs/privatekey.key"
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
volumes:
|
||||
- ./init/certs:/certs
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-docker:
|
||||
name: bw-docker
|
|
@ -0,0 +1,11 @@
|
|||
FROM alpine
|
||||
|
||||
RUN apk add --no-cache bash openssl
|
||||
|
||||
WORKDIR /opt/init
|
||||
|
||||
COPY entrypoint.sh .
|
||||
|
||||
RUN chmod +x entrypoint.sh
|
||||
|
||||
ENTRYPOINT [ "./entrypoint.sh" ]
|
|
@ -0,0 +1,7 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "ℹ️ Generating certificate for www.example.com ..."
|
||||
openssl req -nodes -x509 -newkey rsa:4096 -keyout /certs/privatekey.key -out /certs/certificate.pem -days 365 -subj /CN=www.example.com/
|
||||
|
||||
chown -R root:101 /certs
|
||||
chmod -R 777 /certs
|
|
@ -0,0 +1,49 @@
|
|||
from os import getenv
|
||||
from requests import get
|
||||
from requests.exceptions import RequestException
|
||||
from traceback import format_exc
|
||||
|
||||
try:
|
||||
use_custom_ssl = getenv("USE_CUSTOM_SSL", "no") == "yes"
|
||||
|
||||
print(
|
||||
"ℹ️ Sending a request to http://www.example.com ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
try:
|
||||
get("http://www.example.com", headers={"Host": "www.example.com"})
|
||||
except RequestException:
|
||||
if not use_custom_ssl:
|
||||
print(
|
||||
"❌ The request failed even though the Custom Cert isn't activated, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if not use_custom_ssl:
|
||||
print("✅ The Custom Cert isn't activated, as expected ...", flush=True)
|
||||
exit(0)
|
||||
|
||||
print(
|
||||
"ℹ️ Sending a request to https://www.example.com ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
try:
|
||||
get(
|
||||
"https://www.example.com", headers={"Host": "www.example.com"}, verify=False
|
||||
)
|
||||
except RequestException:
|
||||
print(
|
||||
"❌ The request failed even though the Custom Cert is activated, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ The Custom Cert is activated, as expected ...", flush=True)
|
||||
except SystemExit as e:
|
||||
exit(e.code)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1 @@
|
|||
requests==2.30.0
|
|
@ -0,0 +1,122 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "🔏 Building customcert stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔏 Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔏 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
rm -rf init/certs
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_CUSTOM_SSL: "yes"@USE_CUSTOM_SSL: "no"@' {} \;
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🔏 Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔏 Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🔏 Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
echo "🔏 Initializing workspace ..."
|
||||
rm -rf init/certs
|
||||
mkdir -p init/certs
|
||||
docker compose -f docker-compose.init.yml up --build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔏 Build failed ❌"
|
||||
exit 1
|
||||
elif ! [[ -f "init/certs/certificate.pem" ]]; then
|
||||
echo "🔏 certificate.pem not found ❌"
|
||||
exit 1
|
||||
elif ! [[ -f "init/certs/privatekey.key" ]]; then
|
||||
echo "🔏 privatekey.key not found ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for test in "deactivated" "activated"
|
||||
do
|
||||
if [ "$test" = "deactivated" ] ; then
|
||||
echo "🔏 Running tests without the custom cert ..."
|
||||
elif [ "$test" = "activated" ] ; then
|
||||
echo "🔏 Running tests with the custom cert activated ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@USE_CUSTOM_SSL: "no"@USE_CUSTOM_SSL: "yes"@' {} \;
|
||||
fi
|
||||
|
||||
echo "🔏 Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔏 Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "🔏 Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("customcert-bw-1" "customcert-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "🔏 Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "🔏 Docker stack is not healthy ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
docker compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "🔏 Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "🔏 Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "🔏 Tests are done ! ✅"
|
|
@ -0,0 +1,23 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
RUN addgroup -g 101 nginx && \
|
||||
adduser -h /opt/tests -g nginx -s /bin/sh -G nginx -D -H -u 101 nginx
|
||||
|
||||
COPY --chown=nginx:nginx main.py .
|
||||
ADD ./init/plugins external
|
||||
|
||||
RUN chown -R nginx:nginx external && \
|
||||
chmod -R 777 external
|
||||
|
||||
USER nginx:nginx
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,9 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
init:
|
||||
build: init
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
volumes:
|
||||
- ./init/plugins:/plugins
|
|
@ -0,0 +1,42 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
volumes:
|
||||
- bw-data:/data/lib
|
||||
- bw-db:/opt/tests/db
|
||||
- bw-core-plugins:/opt/tests/core
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
DATABASE_URI: "sqlite:////var/lib/bunkerweb/db.sqlite3"
|
||||
GLOBAL_API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24"
|
||||
GLOBAL_MULTISITE: "no"
|
||||
GLOBAL_HTTP_PORT: "80"
|
||||
GLOBAL_USE_BUNKERNET: "no"
|
||||
GLOBAL_USE_BLACKLIST: "no"
|
||||
GLOBAL_USE_REVERSE_PROXY: "yes"
|
||||
GLOBAL_REVERSE_PROXY_HOST: "http://app1:8080"
|
||||
GLOBAL_REVERSE_PROXY_URL: "/"
|
||||
GLOBAL_LOG_LEVEL: "info"
|
||||
CUSTOM_CONF_MODSEC_test_custom_conf: 'SecRule REQUEST_FILENAME "@rx ^/db" "id:1,ctl:ruleRemoveByTag=attack-generic,ctl:ruleRemoveByTag=attack-protocol,nolog"'
|
||||
extra_hosts:
|
||||
- "bwadm.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-docker:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
volumes:
|
||||
bw-data:
|
||||
external: true
|
||||
bw-db:
|
||||
external: true
|
||||
bw-core-plugins:
|
||||
external: true
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
||||
bw-docker:
|
||||
external: true
|
|
@ -0,0 +1,112 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
bw:
|
||||
image: bunkerity/bunkerweb:1.5.0-beta
|
||||
pull_policy: never
|
||||
labels:
|
||||
- "bunkerweb.INSTANCE"
|
||||
environment:
|
||||
SERVER_NAME: "bwadm.example.com"
|
||||
API_WHITELIST_IP: "127.0.0.0/8 10.20.30.0/24"
|
||||
MULTISITE: "no"
|
||||
HTTP_PORT: "80"
|
||||
USE_BUNKERNET: "no"
|
||||
USE_BLACKLIST: "no"
|
||||
USE_REVERSE_PROXY: "yes"
|
||||
REVERSE_PROXY_HOST: "http://app1:8080"
|
||||
REVERSE_PROXY_URL: "/"
|
||||
LOG_LEVEL: "info"
|
||||
CUSTOM_CONF_MODSEC_test_custom_conf: 'SecRule REQUEST_FILENAME "@rx ^/db" "id:1,ctl:ruleRemoveByTag=attack-generic,ctl:ruleRemoveByTag=attack-protocol,nolog"'
|
||||
networks:
|
||||
bw-universe:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.2
|
||||
|
||||
bw-scheduler:
|
||||
image: bunkerity/bunkerweb-scheduler:1.5.0-beta
|
||||
pull_policy: never
|
||||
depends_on:
|
||||
- bw
|
||||
- bw-docker
|
||||
volumes:
|
||||
- bw-data:/data/lib
|
||||
- bw-db:/usr/share/bunkerweb/db
|
||||
- bw-core-plugins:/usr/share/bunkerweb/core
|
||||
- ./init/plugins:/data/plugins
|
||||
environment:
|
||||
DOCKER_HOST: "tcp://bw-docker:2375"
|
||||
LOG_LEVEL: "info"
|
||||
# ? DATABASE settings
|
||||
DATABASE_URI: "sqlite:////var/lib/bunkerweb/db.sqlite3"
|
||||
networks:
|
||||
- bw-universe
|
||||
- bw-docker
|
||||
|
||||
bw-docker:
|
||||
image: tecnativa/docker-socket-proxy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
CONTAINERS: "1"
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
app1:
|
||||
image: nginxdemos/nginx-hello
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.4
|
||||
|
||||
bw-maria-db:
|
||||
image: mariadb:10.10
|
||||
environment:
|
||||
- MYSQL_RANDOM_ROOT_PASSWORD=yes
|
||||
- MYSQL_DATABASE=db
|
||||
- MYSQL_USER=bunkerweb
|
||||
- MYSQL_PASSWORD=secret
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
bw-mysql-db:
|
||||
image: mysql:8.0
|
||||
environment:
|
||||
- MYSQL_RANDOM_ROOT_PASSWORD=yes
|
||||
- MYSQL_DATABASE=db
|
||||
- MYSQL_USER=bunkerweb
|
||||
- MYSQL_PASSWORD=secret
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
bw-postgres-db:
|
||||
image: postgres:15.1
|
||||
environment:
|
||||
- POSTGRES_USER=bunkerweb
|
||||
- POSTGRES_PASSWORD=secret
|
||||
- POSTGRES_DB=db
|
||||
networks:
|
||||
- bw-docker
|
||||
|
||||
volumes:
|
||||
bw-data:
|
||||
name: bw-data
|
||||
bw-db:
|
||||
name: bw-db
|
||||
bw-core-plugins:
|
||||
name: bw-core-plugins
|
||||
|
||||
networks:
|
||||
bw-universe:
|
||||
name: bw-universe
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.20.30.0/24
|
||||
bw-services:
|
||||
name: bw-services
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.0.0/24
|
||||
bw-docker:
|
||||
name: bw-docker
|
|
@ -0,0 +1,11 @@
|
|||
FROM alpine
|
||||
|
||||
RUN apk add --no-cache bash git
|
||||
|
||||
WORKDIR /opt/init
|
||||
|
||||
COPY entrypoint.sh .
|
||||
|
||||
RUN chmod +x entrypoint.sh
|
||||
|
||||
ENTRYPOINT [ "./entrypoint.sh" ]
|
|
@ -0,0 +1,17 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "ℹ️ Cloning BunkerWeb Plugins ..."
|
||||
|
||||
git clone https://github.com/bunkerity/bunkerweb-plugins.git
|
||||
|
||||
echo "ℹ️ Checking out to dev branch ..."
|
||||
|
||||
cd bunkerweb-plugins
|
||||
git checkout dev # TODO: remove this when the next release of bw-plugins is out
|
||||
|
||||
echo "ℹ️ Extracting ClamAV plugin ..."
|
||||
|
||||
cp -r clamav /plugins/
|
||||
|
||||
chown -R root:101 /plugins
|
||||
chmod -R 777 /plugins
|
|
@ -0,0 +1,984 @@
|
|||
from contextlib import contextmanager
|
||||
from glob import iglob
|
||||
from hashlib import sha512
|
||||
from json import dumps, load
|
||||
from os import environ, getenv
|
||||
from os.path import dirname, join
|
||||
from pathlib import Path
|
||||
from re import compile as re_compile
|
||||
from sqlalchemy import create_engine, text
|
||||
from sqlalchemy.exc import (
|
||||
ArgumentError,
|
||||
DatabaseError,
|
||||
OperationalError,
|
||||
SQLAlchemyError,
|
||||
)
|
||||
from sqlalchemy.orm import scoped_session, sessionmaker
|
||||
from traceback import format_exc
|
||||
from time import sleep
|
||||
|
||||
from db.model import (
|
||||
Custom_configs,
|
||||
Global_values,
|
||||
Jobs,
|
||||
Metadata,
|
||||
Plugins,
|
||||
Plugin_pages,
|
||||
Services,
|
||||
Services_settings,
|
||||
Settings,
|
||||
)
|
||||
|
||||
try:
|
||||
database_uri = getenv("DATABASE_URI", "sqlite:////var/lib/bunkerweb/db.sqlite3")
|
||||
|
||||
if database_uri == "sqlite:////var/lib/bunkerweb/db.sqlite3":
|
||||
database_uri = "sqlite:////data/lib/db.sqlite3"
|
||||
|
||||
error = False
|
||||
|
||||
print(f"ℹ️ Connecting to database: {database_uri}", flush=True)
|
||||
|
||||
try:
|
||||
sql_engine = create_engine(
|
||||
database_uri,
|
||||
future=True,
|
||||
)
|
||||
except ArgumentError:
|
||||
print(f"❌ Invalid database URI: {database_uri}", flush=True)
|
||||
error = True
|
||||
except SQLAlchemyError:
|
||||
print(f"❌ Error when trying to create the engine: {format_exc()}", flush=True)
|
||||
error = True
|
||||
finally:
|
||||
if error:
|
||||
exit(1)
|
||||
|
||||
try:
|
||||
assert sql_engine is not None
|
||||
except AssertionError:
|
||||
print("❌ The database engine is not initialized", flush=True)
|
||||
exit(1)
|
||||
|
||||
not_connected = True
|
||||
retries = 15
|
||||
|
||||
while not_connected:
|
||||
try:
|
||||
with sql_engine.connect() as conn:
|
||||
conn.execute(text("CREATE TABLE IF NOT EXISTS test (id INT)"))
|
||||
conn.execute(text("DROP TABLE test"))
|
||||
not_connected = False
|
||||
except (OperationalError, DatabaseError) as e:
|
||||
if retries <= 0:
|
||||
print(f"❌ Can't connect to database : {format_exc()}", flush=True)
|
||||
exit(1)
|
||||
|
||||
if "attempt to write a readonly database" in str(e):
|
||||
print(
|
||||
"⚠️ The database is read-only, waiting for it to become writable. Retrying in 5 seconds ...",
|
||||
flush=True,
|
||||
)
|
||||
sql_engine.dispose(close=True)
|
||||
sql_engine = create_engine(
|
||||
database_uri,
|
||||
future=True,
|
||||
)
|
||||
if "Unknown table" in str(e):
|
||||
not_connected = False
|
||||
continue
|
||||
else:
|
||||
print(
|
||||
"⚠️ Can't connect to database, retrying in 5 seconds ...",
|
||||
flush=True,
|
||||
)
|
||||
retries -= 1
|
||||
sleep(5)
|
||||
except BaseException:
|
||||
print(
|
||||
f"❌ Error when trying to connect to the database: {format_exc()}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("ℹ️ Database connection established, launching tests ...", flush=True)
|
||||
|
||||
session = sessionmaker()
|
||||
sql_session = scoped_session(session)
|
||||
sql_session.remove()
|
||||
sql_session.configure(bind=sql_engine, autoflush=False, expire_on_commit=False)
|
||||
|
||||
@contextmanager
|
||||
def db_session():
|
||||
try:
|
||||
assert sql_session is not None
|
||||
except AssertionError:
|
||||
print("❌ The database session is not initialized", flush=True)
|
||||
exit(1)
|
||||
|
||||
session = sql_session()
|
||||
session.expire_on_commit = False
|
||||
|
||||
try:
|
||||
yield session
|
||||
except BaseException:
|
||||
session.rollback()
|
||||
raise
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
print("ℹ️ Checking if database is initialized ...", flush=True)
|
||||
|
||||
with db_session() as session:
|
||||
metadata = (
|
||||
session.query(Metadata)
|
||||
.with_entities(Metadata.is_initialized)
|
||||
.filter_by(id=1)
|
||||
.first()
|
||||
)
|
||||
|
||||
if metadata is None or not metadata.is_initialized:
|
||||
print(
|
||||
"❌ The database is not initialized, it should be, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ Database is initialized", flush=True)
|
||||
print(" ", flush=True)
|
||||
print("ℹ️ Checking if service bwadm.example.com is in the database ...", flush=True)
|
||||
|
||||
with db_session() as session:
|
||||
services = session.query(Services).all()
|
||||
|
||||
if not services:
|
||||
print(
|
||||
"❌ The bw_services database table is empty, it shouldn't be, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if services[0].id != "bwadm.example.com":
|
||||
print(
|
||||
"❌ The service bwadm.example.com is not in the database, it should be, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ Service bwadm.example.com is in the database", flush=True)
|
||||
print(" ", flush=True)
|
||||
print(
|
||||
"ℹ️ Checking if global values are in the database and are correct ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
global_settings = {}
|
||||
service_settings = {}
|
||||
multisite = getenv("GLOBAL_MULTISITE", "no") == "yes"
|
||||
for env in environ:
|
||||
if env.startswith("GLOBAL_"):
|
||||
if env == "GLOBAL_MULTISITE" and environ[env] == "no":
|
||||
continue
|
||||
global_settings[env[7:]] = {"value": environ[env], "checked": False}
|
||||
elif env.startswith("SERVICE_"):
|
||||
service_settings[env[8:]] = {"value": environ[env], "checked": False}
|
||||
|
||||
with db_session() as session:
|
||||
global_values = session.query(Global_values).all()
|
||||
|
||||
for global_value in global_values:
|
||||
if global_value.setting_id in global_settings:
|
||||
if (
|
||||
global_value.value
|
||||
!= global_settings[global_value.setting_id]["value"]
|
||||
):
|
||||
print(
|
||||
f"❌ The global value {global_value.setting_id} is in the database but is not correct, exiting ...\n{global_value.value} (database) != {global_settings[global_value.setting_id]['value']} (env)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif global_value.suffix != 0:
|
||||
print(
|
||||
f"❌ The global value {global_value.setting_id} is in the database but has the wrong suffix, exiting ...\n{global_value.suffix} (database) != 0 (env)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif global_value.method != "scheduler":
|
||||
print(
|
||||
f"❌ The global value {global_value.setting_id} is in the database but has the wrong method, exiting ...\n{global_value.method} (database) != scheduler (env)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
global_settings[global_value.setting_id]["checked"] = True
|
||||
else:
|
||||
print(
|
||||
f"❌ The global value {global_value.setting_id} is in the database but should not be, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if not all(
|
||||
[global_settings[global_value]["checked"] for global_value in global_settings]
|
||||
):
|
||||
print(
|
||||
f"❌ Not all global values are in the database, exiting ...\nmissing values: {', '.join([global_value for global_value in global_settings if not global_settings[global_value]['checked']])}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ Global values are in the database and are correct", flush=True)
|
||||
print(" ", flush=True)
|
||||
print(
|
||||
"ℹ️ Checking if service values are in the database and are correct ...",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
with db_session() as session:
|
||||
services_settings = session.query(Services_settings).all()
|
||||
|
||||
if not multisite and service_settings:
|
||||
print(
|
||||
'❌ The bw_services_settings database table is not empty, it should be when multisite is set to "no", exiting ...',
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
else:
|
||||
for service_setting in services_settings:
|
||||
if service_setting.setting_id in service_settings:
|
||||
if (
|
||||
service_setting.value
|
||||
!= service_settings[service_setting.setting_id]["value"]
|
||||
):
|
||||
print(
|
||||
f"❌ The service value {service_setting.setting_id} is in the database but is not correct, exiting ...\n{service_setting.value} (database) != {service_settings[service_setting.setting_id]['value']} (env)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif service_setting.suffix != 0:
|
||||
print(
|
||||
f"❌ The service value {service_setting.setting_id} is in the database but has the wrong suffix, exiting ...\n{service_setting.suffix} (database) != 0 (env)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif service_setting.method != "scheduler":
|
||||
print(
|
||||
f"❌ The service value {service_setting.setting_id} is in the database but has the wrong method, exiting ...\n{service_setting.method} (database) != scheduler (env)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
service_settings[service_setting.setting_id]["checked"] = True
|
||||
else:
|
||||
print(
|
||||
f"❌ The service value {service_setting.setting_id} is in the database but should not be, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if not all(
|
||||
[
|
||||
service_settings[service_setting]["checked"]
|
||||
for service_setting in service_settings
|
||||
]
|
||||
):
|
||||
print(
|
||||
f"❌ Not all service values are in the database, exiting ...\nmissing values: {', '.join([service_setting for service_setting in service_settings if not service_settings[service_setting]['checked']])}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ Service values are correct", flush=True)
|
||||
print(" ", flush=True)
|
||||
print("ℹ️ Checking if the plugins are correct ...", flush=True)
|
||||
|
||||
core_plugins = {
|
||||
"general": {
|
||||
"order": 999,
|
||||
"name": "General",
|
||||
"description": "The general settings for the server",
|
||||
"version": "0.1",
|
||||
"stream": "partial",
|
||||
"external": False,
|
||||
"checked": False,
|
||||
"page_checked": True,
|
||||
"settings": {
|
||||
"IS_LOADING": {
|
||||
"context": "global",
|
||||
"default": "no",
|
||||
"help": "Internal use : set to yes when BW is loading.",
|
||||
"id": "internal-use",
|
||||
"label": "internal use",
|
||||
"regex": "^(yes|no)$",
|
||||
"type": "check",
|
||||
},
|
||||
"NGINX_PREFIX": {
|
||||
"context": "global",
|
||||
"default": "/etc/nginx/",
|
||||
"help": "Where nginx will search for configurations.",
|
||||
"id": "nginx-prefix",
|
||||
"label": "nginx prefix",
|
||||
"regex": "^(/[\\w. -]+)*/$",
|
||||
"type": "text",
|
||||
},
|
||||
"HTTP_PORT": {
|
||||
"context": "global",
|
||||
"default": "8080",
|
||||
"help": "HTTP port number which bunkerweb binds to.",
|
||||
"id": "http-port",
|
||||
"label": "HTTP port",
|
||||
"regex": "^\\d+$",
|
||||
"type": "text",
|
||||
},
|
||||
"HTTPS_PORT": {
|
||||
"context": "global",
|
||||
"default": "8443",
|
||||
"help": "HTTPS port number which bunkerweb binds to.",
|
||||
"id": "https-port",
|
||||
"label": "HTTPS port",
|
||||
"regex": "^\\d+$",
|
||||
"type": "text",
|
||||
},
|
||||
"MULTISITE": {
|
||||
"context": "global",
|
||||
"default": "no",
|
||||
"help": "Multi site activation.",
|
||||
"id": "multisite",
|
||||
"label": "Multisite",
|
||||
"regex": "^(yes|no)$",
|
||||
"type": "check",
|
||||
},
|
||||
"SERVER_NAME": {
|
||||
"context": "multisite",
|
||||
"default": "www.example.com",
|
||||
"help": "List of the virtual hosts served by bunkerweb.",
|
||||
"id": "server-name",
|
||||
"label": "Server name",
|
||||
"regex": "^(?! )( ?((?=.{1,255}$)[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?(?:\\.[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?)*\\.?)(?!.* \\2))*$",
|
||||
"type": "text",
|
||||
},
|
||||
"WORKER_PROCESSES": {
|
||||
"context": "global",
|
||||
"default": "auto",
|
||||
"help": "Number of worker processes.",
|
||||
"id": "worker-processes",
|
||||
"label": "Worker processes",
|
||||
"regex": "^(auto|\\d+)$",
|
||||
"type": "text",
|
||||
},
|
||||
"WORKER_RLIMIT_NOFILE": {
|
||||
"context": "global",
|
||||
"default": "2048",
|
||||
"help": "Maximum number of open files for worker processes.",
|
||||
"id": "worker-rlimit-nofile",
|
||||
"label": "Open files per worker",
|
||||
"regex": "^\\d+$",
|
||||
"type": "text",
|
||||
},
|
||||
"WORKER_CONNECTIONS": {
|
||||
"context": "global",
|
||||
"default": "1024",
|
||||
"help": "Maximum number of connections per worker.",
|
||||
"id": "worker-connections",
|
||||
"label": "Connections per worker",
|
||||
"regex": "^\\d+$",
|
||||
"type": "text",
|
||||
},
|
||||
"LOG_FORMAT": {
|
||||
"context": "global",
|
||||
"default": '$host $remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent"',
|
||||
"help": "The format to use for access logs.",
|
||||
"id": "log-format",
|
||||
"label": "Log format",
|
||||
"regex": "^.*$",
|
||||
"type": "text",
|
||||
},
|
||||
"LOG_LEVEL": {
|
||||
"context": "global",
|
||||
"default": "notice",
|
||||
"help": "The level to use for error logs.",
|
||||
"id": "log-level",
|
||||
"label": "Log level",
|
||||
"regex": "^(debug|info|notice|warn|error|crit|alert|emerg)$",
|
||||
"type": "select",
|
||||
"select": [
|
||||
"debug",
|
||||
"info",
|
||||
"notice",
|
||||
"warn",
|
||||
"error",
|
||||
"crit",
|
||||
"alert",
|
||||
"emerg",
|
||||
],
|
||||
},
|
||||
"DNS_RESOLVERS": {
|
||||
"context": "global",
|
||||
"default": "127.0.0.11",
|
||||
"help": "DNS addresses of resolvers to use.",
|
||||
"id": "dns-resolvers",
|
||||
"label": "DNS resolvers",
|
||||
"regex": "^(?! )( *(((\\b25[0-5]|\\b2[0-4]\\d|\\b[01]?\\d\\d?)(\\.(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)){3})(\\/([1-2][0-9]?|3[0-2]?|[04-9]))?|(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]Z{0,4}){0,4}%[0-9a-zA-Z]+|::(ffff(:0{1,4})?:)?((25[0-5]|(2[0-4]|1?\\d)?\\d)\\.){3}(25[0-5]|(2[0-4]|1?\\d)?\\d)|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1?\\d)?\\d)\\.){3}(25[0-5]|(2[0-4]|1?\\d)?\\d))(\\/(12[0-8]|1[01][0-9]|[0-9][0-9]?))?)(?!.*\\D\\2([^\\d\\/]|$)) *)*$",
|
||||
"type": "text",
|
||||
},
|
||||
"DATASTORE_MEMORY_SIZE": {
|
||||
"context": "global",
|
||||
"default": "64m",
|
||||
"help": "Size of the internal datastore.",
|
||||
"id": "datastore-memory-size",
|
||||
"label": "Datastore memory size",
|
||||
"regex": "^\\d+[kKmMgG]?$",
|
||||
"type": "text",
|
||||
},
|
||||
"CACHESTORE_MEMORY_SIZE": {
|
||||
"context": "global",
|
||||
"default": "64m",
|
||||
"help": "Size of the internal cachestore.",
|
||||
"id": "cachestore-memory-size",
|
||||
"label": "Cachestore memory size",
|
||||
"regex": "^\\d+[kKmMgG]?$",
|
||||
"type": "text",
|
||||
},
|
||||
"CACHESTORE_IPC_MEMORY_SIZE": {
|
||||
"context": "global",
|
||||
"default": "16m",
|
||||
"help": "Size of the internal cachestore (ipc).",
|
||||
"id": "cachestore-ipc-memory-size",
|
||||
"label": "Cachestore ipc memory size",
|
||||
"regex": "^\\d+[kKmMgG]?$",
|
||||
"type": "text",
|
||||
},
|
||||
"CACHESTORE_MISS_MEMORY_SIZE": {
|
||||
"context": "global",
|
||||
"default": "16m",
|
||||
"help": "Size of the internal cachestore (miss).",
|
||||
"id": "cachestore-miss-memory-size",
|
||||
"label": "Cachestore miss memory size",
|
||||
"regex": "^\\d+[kKmMgG]?$",
|
||||
"type": "text",
|
||||
},
|
||||
"CACHESTORE_LOCKS_MEMORY_SIZE": {
|
||||
"context": "global",
|
||||
"default": "16m",
|
||||
"help": "Size of the internal cachestore (locks).",
|
||||
"id": "cachestore-locks-memory-size",
|
||||
"label": "Cachestore locks memory size",
|
||||
"regex": "^\\d+[kKmMgG]?$",
|
||||
"type": "text",
|
||||
},
|
||||
"USE_API": {
|
||||
"context": "global",
|
||||
"default": "yes",
|
||||
"help": "Activate the API to control BunkerWeb.",
|
||||
"id": "use-api",
|
||||
"label": "Activate API",
|
||||
"regex": "^(yes|no)$",
|
||||
"type": "check",
|
||||
},
|
||||
"API_HTTP_PORT": {
|
||||
"context": "global",
|
||||
"default": "5000",
|
||||
"help": "Listen port number for the API.",
|
||||
"id": "api-http-listen",
|
||||
"label": "API port number",
|
||||
"regex": "^\\d+$",
|
||||
"type": "text",
|
||||
},
|
||||
"API_LISTEN_IP": {
|
||||
"context": "global",
|
||||
"default": "0.0.0.0",
|
||||
"help": "Listen IP address for the API.",
|
||||
"id": "api-ip-listen",
|
||||
"label": "API listen IP",
|
||||
"regex": "^.*$",
|
||||
"type": "text",
|
||||
},
|
||||
"API_SERVER_NAME": {
|
||||
"context": "global",
|
||||
"default": "bwapi",
|
||||
"help": "Server name (virtual host) for the API.",
|
||||
"id": "api-server-name",
|
||||
"label": "API server name",
|
||||
"regex": "^(?=.{1,255}$)[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?(?:\\.[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?)*\\.?$",
|
||||
"type": "text",
|
||||
},
|
||||
"API_WHITELIST_IP": {
|
||||
"context": "global",
|
||||
"default": "127.0.0.0/8",
|
||||
"help": "List of IP/network allowed to contact the API.",
|
||||
"id": "api-whitelist-ip",
|
||||
"label": "API whitelist IP",
|
||||
"regex": "^(?! )( *(((\\b25[0-5]|\\b2[0-4]\\d|\\b[01]?\\d\\d?)(\\.(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)){3})(\\/([1-2][0-9]?|3[0-2]?|[04-9]))?|(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]Z{0,4}){0,4}%[0-9a-zA-Z]+|::(ffff(:0{1,4})?:)?((25[0-5]|(2[0-4]|1?\\d)?\\d)\\.){3}(25[0-5]|(2[0-4]|1?\\d)?\\d)|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1?\\d)?\\d)\\.){3}(25[0-5]|(2[0-4]|1?\\d)?\\d))(\\/(12[0-8]|1[01][0-9]|[0-9][0-9]?))?)(?!.*\\D\\2([^\\d\\/]|$)) *)*$",
|
||||
"type": "text",
|
||||
},
|
||||
"AUTOCONF_MODE": {
|
||||
"context": "global",
|
||||
"default": "no",
|
||||
"help": "Enable Autoconf Docker integration.",
|
||||
"id": "autoconf-mode",
|
||||
"label": "Autoconf mode",
|
||||
"regex": "^(yes|no)$",
|
||||
"type": "check",
|
||||
},
|
||||
"SWARM_MODE": {
|
||||
"context": "global",
|
||||
"default": "no",
|
||||
"help": "Enable Docker Swarm integration.",
|
||||
"id": "swarm-mode",
|
||||
"label": "Swarm mode",
|
||||
"regex": "^(yes|no)$",
|
||||
"type": "check",
|
||||
},
|
||||
"KUBERNETES_MODE": {
|
||||
"context": "global",
|
||||
"default": "no",
|
||||
"help": "Enable Kubernetes integration.",
|
||||
"id": "kubernetes-mode",
|
||||
"label": "Kubernetes mode",
|
||||
"regex": "^(yes|no)$",
|
||||
"type": "check",
|
||||
},
|
||||
"SERVER_TYPE": {
|
||||
"context": "multisite",
|
||||
"default": "http",
|
||||
"help": "Server type : http or stream.",
|
||||
"id": "server-type",
|
||||
"label": "Server type",
|
||||
"regex": "^(http|stream)$",
|
||||
"type": "select",
|
||||
"select": ["http", "stream"],
|
||||
},
|
||||
"LISTEN_STREAM": {
|
||||
"context": "multisite",
|
||||
"default": "yes",
|
||||
"help": "Enable listening for non-ssl (passthrough).",
|
||||
"id": "listen-stream",
|
||||
"label": "Listen stream",
|
||||
"regex": "^(yes|no)$",
|
||||
"type": "check",
|
||||
},
|
||||
"LISTEN_STREAM_PORT": {
|
||||
"context": "multisite",
|
||||
"default": "1337",
|
||||
"help": "Listening port for non-ssl (passthrough).",
|
||||
"id": "listen-stream-port",
|
||||
"label": "Listen stream port",
|
||||
"regex": "^[0-9]+$",
|
||||
"type": "text",
|
||||
},
|
||||
"LISTEN_STREAM_PORT_SSL": {
|
||||
"context": "multisite",
|
||||
"default": "4242",
|
||||
"help": "Listening port for ssl (passthrough).",
|
||||
"id": "listen-stream-port-ssl",
|
||||
"label": "Listen stream port ssl",
|
||||
"regex": "^[0-9]+$",
|
||||
"type": "text",
|
||||
},
|
||||
"USE_UDP": {
|
||||
"context": "multisite",
|
||||
"default": "no",
|
||||
"help": "UDP listen instead of TCP (stream).",
|
||||
"id": "use-udp",
|
||||
"label": "Listen UDP",
|
||||
"regex": "^(yes|no)$",
|
||||
"type": "check",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
for filename in iglob(join("core", "*", "plugin.json")):
|
||||
with open(filename, "r") as f:
|
||||
data = load(f)
|
||||
data["checked"] = False
|
||||
for x, job in enumerate(data.get("jobs", [])):
|
||||
data["jobs"][x]["checked"] = False
|
||||
data["page_checked"] = not Path(f"{dirname(filename)}/ui").exists() or False
|
||||
core_plugins[data.pop("id")] = data
|
||||
|
||||
external_plugins = {}
|
||||
for filename in iglob(join("external", "*", "plugin.json")):
|
||||
with open(filename, "r") as f:
|
||||
data = load(f)
|
||||
data["checked"] = False
|
||||
for x, job in enumerate(data.get("jobs", [])):
|
||||
data["jobs"][x]["checked"] = False
|
||||
data["page_checked"] = not Path(f"{dirname(filename)}/ui").exists() or False
|
||||
external_plugins[data.pop("id")] = data
|
||||
|
||||
with db_session() as session:
|
||||
plugins = (
|
||||
session.query(Plugins)
|
||||
.with_entities(
|
||||
Plugins.id,
|
||||
Plugins.order,
|
||||
Plugins.name,
|
||||
Plugins.description,
|
||||
Plugins.version,
|
||||
Plugins.stream,
|
||||
Plugins.external,
|
||||
Plugins.method,
|
||||
)
|
||||
.all()
|
||||
)
|
||||
|
||||
for plugin in plugins:
|
||||
if not plugin.external and plugin.id in core_plugins:
|
||||
current_plugin = core_plugins
|
||||
elif plugin.external and plugin.id in external_plugins:
|
||||
current_plugin = external_plugins
|
||||
else:
|
||||
print(
|
||||
f"❌ The {'external' if plugin.external else 'core'} plugin {plugin.name} (id: {plugin.id}) is in the database but should not be, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if (
|
||||
plugin.order != current_plugin[plugin.id]["order"]
|
||||
or plugin.name != current_plugin[plugin.id]["name"]
|
||||
or plugin.description != current_plugin[plugin.id]["description"]
|
||||
or plugin.version != current_plugin[plugin.id]["version"]
|
||||
or plugin.stream != current_plugin[plugin.id]["stream"]
|
||||
):
|
||||
print(
|
||||
f"❌ The {'external' if plugin.external else 'core'} plugin {plugin.name} (id: {plugin.id}) is in the database but is not correct, exiting ...\n{dumps({'order': plugin.order, 'name': plugin.name, 'description': plugin.description, 'version': plugin.version, 'stream': plugin.stream})} (database) != {dumps({'order': current_plugin[plugin.id]['order'], 'name': current_plugin[plugin.id]['name'], 'description': current_plugin[plugin.id]['description'], 'version': current_plugin[plugin.id]['version'], 'stream': current_plugin[plugin.id]['stream']})} (file)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
else:
|
||||
settings = session.query(Settings).filter_by(plugin_id=plugin.id).all()
|
||||
|
||||
for setting in settings:
|
||||
if (
|
||||
setting.name
|
||||
!= current_plugin[plugin.id]["settings"][setting.id]["id"]
|
||||
or setting.context
|
||||
!= current_plugin[plugin.id]["settings"][setting.id]["context"]
|
||||
or setting.default
|
||||
!= current_plugin[plugin.id]["settings"][setting.id]["default"]
|
||||
or setting.help
|
||||
!= current_plugin[plugin.id]["settings"][setting.id]["help"]
|
||||
or setting.label
|
||||
!= current_plugin[plugin.id]["settings"][setting.id]["label"]
|
||||
or setting.regex
|
||||
!= current_plugin[plugin.id]["settings"][setting.id]["regex"]
|
||||
or setting.type
|
||||
!= current_plugin[plugin.id]["settings"][setting.id]["type"]
|
||||
or setting.multiple
|
||||
!= current_plugin[plugin.id]["settings"][setting.id].get(
|
||||
"multiple", None
|
||||
)
|
||||
):
|
||||
print(
|
||||
f"❌ The {'external' if plugin.external else 'core'} plugin {plugin.name} (id: {plugin.id}) is in the database but is not correct, exiting ...\n{dumps({'default': setting.default, 'help': setting.help, 'label': setting.label, 'regex': setting.regex, 'type': setting.type})} (database) != {dumps({'default': current_plugin[plugin.id]['settings'][setting.id]['default'], 'help': current_plugin[plugin.id]['settings'][setting.id]['help'], 'label': current_plugin[plugin.id]['settings'][setting.id]['label'], 'regex': current_plugin[plugin.id]['settings'][setting.id]['regex'], 'type': current_plugin[plugin.id]['settings'][setting.id]['type']})} (file)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
current_plugin[plugin.id]["checked"] = True
|
||||
|
||||
if not all([core_plugins[plugin]["checked"] for plugin in core_plugins]):
|
||||
print(
|
||||
f"❌ Not all core plugins are in the database, exiting ...\nmissing plugins: {', '.join([plugin for plugin in core_plugins if not core_plugins[plugin]])}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif not all([external_plugins[plugin]["checked"] for plugin in external_plugins]):
|
||||
print(
|
||||
f"❌ Not all external plugins are in the database, exiting ...\nmissing plugins: {', '.join([plugin for plugin in external_plugins if not external_plugins[plugin]])}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ The ClamAV plugin and all core plugins are in the database", flush=True)
|
||||
print(" ", flush=True)
|
||||
print("ℹ️ Checking if the jobs are in the database ...", flush=True)
|
||||
|
||||
with db_session() as session:
|
||||
jobs = session.query(Jobs).all()
|
||||
|
||||
for job in jobs:
|
||||
if not job.success:
|
||||
print(
|
||||
f"❌ The job {job.name} (plugin_id: {job.plugin_id}) is in the database but failed, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if job.plugin_id in core_plugins:
|
||||
current_plugin = core_plugins
|
||||
elif job.plugin_id in external_plugins:
|
||||
current_plugin = external_plugins
|
||||
else:
|
||||
print(
|
||||
f"❌ The job {job.name} (plugin_id: {job.plugin_id}) is in the database but should not be, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
index = next(
|
||||
index
|
||||
for (index, d) in enumerate(
|
||||
current_plugin[job.plugin_id].get("jobs", [])
|
||||
)
|
||||
if d["name"] == job.name
|
||||
)
|
||||
core_job = current_plugin[job.plugin_id]["jobs"][index]
|
||||
|
||||
if (
|
||||
job.name != core_job["name"]
|
||||
or job.file_name != core_job["file"]
|
||||
or job.every != core_job["every"]
|
||||
or job.reload != core_job["reload"]
|
||||
):
|
||||
print(
|
||||
f"❌ The job {job.name} (plugin_id: {job.plugin_id}) is in the database but is not correct, exiting ...\n{dumps({'name': job.name, 'file': job.file_name, 'every': job.every, 'reload': job.reload})} (database) != {dumps({'name': core_job['name'], 'file': core_job['file'], 'every': core_job['every'], 'reload': core_job['reload']})} (file)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
current_plugin[job.plugin_id]["jobs"][index]["checked"] = True
|
||||
|
||||
if not all(
|
||||
[
|
||||
all([job["checked"] for job in core_plugins[plugin].get("jobs", [])])
|
||||
for plugin in core_plugins
|
||||
]
|
||||
):
|
||||
print(
|
||||
f"❌ Not all jobs from core plugins are in the database, exiting ...\nmissing jobs: {dumps({plugin: [job['name'] for job in core_plugins[plugin]['jobs'] if not job['checked']] for plugin in core_plugins})}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif not all(
|
||||
[
|
||||
all([job["checked"] for job in external_plugins[plugin].get("jobs", [])])
|
||||
for plugin in external_plugins
|
||||
]
|
||||
):
|
||||
print(
|
||||
f"❌ Not all jobs from external plugins are in the database, exiting ...\nmissing jobs: {dumps({plugin: [job['name'] for job in external_plugins[plugin]['jobs'] if not job['checked']] for plugin in external_plugins})}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ All jobs are in the database and have successfully ran", flush=True)
|
||||
print(" ", flush=True)
|
||||
print("ℹ️ Checking if all plugin pages are in the database ...", flush=True)
|
||||
|
||||
def file_hash(file: str) -> str:
|
||||
_sha512 = sha512()
|
||||
with open(file, "rb") as f:
|
||||
while True:
|
||||
data = f.read(1024)
|
||||
if not data:
|
||||
break
|
||||
_sha512.update(data)
|
||||
return _sha512.hexdigest()
|
||||
|
||||
with db_session() as session:
|
||||
plugin_pages = (
|
||||
session.query(Plugin_pages)
|
||||
.with_entities(
|
||||
Plugin_pages.id,
|
||||
Plugin_pages.plugin_id,
|
||||
Plugin_pages.template_checksum,
|
||||
Plugin_pages.actions_checksum,
|
||||
)
|
||||
.all()
|
||||
)
|
||||
|
||||
for plugin_page in plugin_pages:
|
||||
if plugin_page.plugin_id in core_plugins:
|
||||
current_plugin = core_plugins
|
||||
elif plugin_page.plugin_id in external_plugins:
|
||||
current_plugin = external_plugins
|
||||
else:
|
||||
print(
|
||||
f"❌ The plugin page from {plugin_page.plugin_id} is in the database but should not be, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
path_ui = (
|
||||
Path(join("core", plugin_page.plugin_id, "ui"))
|
||||
if Path(join("core", plugin_page.plugin_id, "ui")).exists()
|
||||
else Path(join("external", plugin_page.plugin_id, "ui"))
|
||||
)
|
||||
|
||||
if not path_ui.exists():
|
||||
print(
|
||||
f'❌ The plugin page from {plugin_page.plugin_id} is in the database but should not be because the "ui" folder is missing from the plugin, exiting ...',
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
template_checksum = file_hash(f"{path_ui}/template.html")
|
||||
actions_checksum = file_hash(f"{path_ui}/actions.py")
|
||||
|
||||
if plugin_page.template_checksum != template_checksum:
|
||||
print(
|
||||
f"❌ The plugin page from {plugin_page.plugin_id} is in the database but the template file checksum differ, exiting ...\n{plugin_page.template_checksum} (database) != {template_checksum} (file)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif plugin_page.actions_checksum != actions_checksum:
|
||||
print(
|
||||
f"❌ The plugin page from {plugin_page.plugin_id} is in the database but the actions file checksum differ, exiting ...\n{plugin_page.actions_checksum} (database) != {actions_checksum} (file)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
current_plugin[plugin_page.plugin_id]["page_checked"] = True
|
||||
|
||||
if not all([core_plugins[plugin]["page_checked"] for plugin in core_plugins]):
|
||||
print(
|
||||
f"❌ Not all core plugins pages are in the database, exiting ...\nmissing plugins pages: {', '.join([plugin for plugin in core_plugins if not core_plugins[plugin]['page_checked']])}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif not all(
|
||||
[external_plugins[plugin]["page_checked"] for plugin in external_plugins]
|
||||
):
|
||||
print(
|
||||
f"❌ Not all external plugins pages are in the database, exiting ...\nmissing plugins pages: {', '.join([plugin for plugin in external_plugins if not external_plugins[plugin]['page_checked']])}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print("✅ All plugin pages are in the database and have the right value", flush=True)
|
||||
print(" ", flush=True)
|
||||
print("ℹ️ Checking if all custom configs are in the database ...", flush=True)
|
||||
|
||||
custom_confs_rx = re_compile(
|
||||
r"^([0-9a-z\.-]*)_?CUSTOM_CONF_(SERVICE_)?(HTTP|SERVER_STREAM|STREAM|DEFAULT_SERVER_HTTP|SERVER_HTTP|MODSEC_CRS|MODSEC)_(.+)$"
|
||||
)
|
||||
|
||||
global_custom_configs = {}
|
||||
service_custom_configs = {}
|
||||
for env in environ:
|
||||
if not custom_confs_rx.match(env):
|
||||
continue
|
||||
|
||||
custom_conf = custom_confs_rx.search(env).groups()
|
||||
if custom_conf[1]:
|
||||
service_custom_configs[custom_conf[3]] = {
|
||||
"value": environ[env].encode(),
|
||||
"type": custom_conf[2].lower(),
|
||||
"method": "scheduler",
|
||||
"checked": False,
|
||||
}
|
||||
continue
|
||||
|
||||
global_custom_configs[custom_conf[3]] = {
|
||||
"value": environ[env].encode(),
|
||||
"type": custom_conf[2].lower(),
|
||||
"method": "scheduler",
|
||||
"checked": False,
|
||||
}
|
||||
|
||||
with db_session() as session:
|
||||
custom_configs = (
|
||||
session.query(Custom_configs)
|
||||
.with_entities(
|
||||
Custom_configs.service_id,
|
||||
Custom_configs.type,
|
||||
Custom_configs.name,
|
||||
Custom_configs.data,
|
||||
Custom_configs.method,
|
||||
)
|
||||
.all()
|
||||
)
|
||||
|
||||
for custom_config in custom_configs:
|
||||
if (
|
||||
not multisite
|
||||
and custom_config.name in global_custom_configs
|
||||
and custom_config.service_id
|
||||
):
|
||||
print(
|
||||
f"❌ The custom config {custom_config.name} is in the database but should not be owned by the service {custom_config.service_id} because multisite is not enabled, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif (
|
||||
multisite
|
||||
and custom_config.name in service_custom_configs
|
||||
and not custom_config.service_id
|
||||
):
|
||||
print(
|
||||
f"❌ The custom config {custom_config.name} is in the database but should be owned by the service bwadm.example.com because it's a service config, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if custom_config.name in global_custom_configs:
|
||||
current_custom_configs = global_custom_configs
|
||||
elif custom_config.name in service_custom_configs:
|
||||
current_custom_configs = service_custom_configs
|
||||
else:
|
||||
print(
|
||||
f"❌ The custom config {custom_config.name} is in the database but should not be, exiting ...",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if custom_config.type != current_custom_configs[custom_config.name]["type"]:
|
||||
print(
|
||||
f"❌ The custom config {custom_config.name} is in the database but the type differ, exiting ...\n{custom_config.type} (database) != {current_custom_configs[custom_config.name]['type']} (env)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif (
|
||||
custom_config.data
|
||||
!= current_custom_configs[custom_config.name]["value"]
|
||||
):
|
||||
print(
|
||||
f"❌ The custom config {custom_config.name} is in the database but the value differ, exiting ...\n{custom_config.data} (database) != {current_custom_configs[custom_config.name]['value']} (env)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif (
|
||||
custom_config.method
|
||||
!= current_custom_configs[custom_config.name]["method"]
|
||||
):
|
||||
print(
|
||||
f"❌ The custom config {custom_config.name} is in the database but the method differ, exiting ...\n{custom_config.method} (database) != {current_custom_configs[custom_config.name]['method']} (env)",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
current_custom_configs[custom_config.name]["checked"] = True
|
||||
|
||||
if not all(
|
||||
[
|
||||
global_custom_configs[custom_config]["checked"]
|
||||
for custom_config in global_custom_configs
|
||||
]
|
||||
):
|
||||
print(
|
||||
f"❌ Not all global custom configs are in the database, exiting ...\nmissing custom configs: {', '.join([custom_config for custom_config in global_custom_configs if not global_custom_configs[custom_config]['checked']])}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
elif not all(
|
||||
[
|
||||
service_custom_configs[custom_config]["checked"]
|
||||
for custom_config in service_custom_configs
|
||||
]
|
||||
):
|
||||
print(
|
||||
f"❌ Not all service custom configs are in the database, exiting ...\nmissing custom configs: {', '.join([custom_config for custom_config in service_custom_configs if not service_custom_configs[custom_config]['checked']])}",
|
||||
flush=True,
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print(
|
||||
"✅ All custom configs are in the database and have the right value", flush=True
|
||||
)
|
||||
except SystemExit:
|
||||
exit(1)
|
||||
except:
|
||||
print(f"❌ Something went wrong, exiting ...\n{format_exc()}", flush=True)
|
||||
exit(1)
|
|
@ -0,0 +1,4 @@
|
|||
sqlalchemy==2.0.13
|
||||
psycopg2-binary==2.9.6
|
||||
PyMySQL==1.0.3
|
||||
cryptography==40.0.2
|
|
@ -0,0 +1,168 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "💾 Building db stack ..."
|
||||
|
||||
# Starting stack
|
||||
docker compose pull bw-docker app1 bw-maria-db bw-mysql-db bw-postgres-db
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "💾 Pull failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
manual=0
|
||||
end=0
|
||||
cleanup_stack () {
|
||||
exit_code=$?
|
||||
if [[ $end -eq 1 || $exit_code = 1 ]] || [[ $end -eq 0 && $exit_code = 0 ]] && [ $manual = 0 ] ; then
|
||||
rm -rf init/plugins
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@DATABASE_URI: ".*"$@DATABASE_URI: "sqlite:////var/lib/bunkerweb/db.sqlite3"@' {} \;
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@MULTISITE: "yes"$@MULTISITE: "no"@' {} \;
|
||||
sed -i 's@bwadm.example.com_USE_REVERSE_PROXY@USE_REVERSE_PROXY@' docker-compose.yml
|
||||
sed -i 's@bwadm.example.com_REVERSE_PROXY_HOST@REVERSE_PROXY_HOST@' docker-compose.yml
|
||||
sed -i 's@bwadm.example.com_REVERSE_PROXY_URL@REVERSE_PROXY_URL@' docker-compose.yml
|
||||
sed -i 's@SERVICE_USE_REVERSE_PROXY@GLOBAL_USE_REVERSE_PROXY@' docker-compose.test.yml
|
||||
sed -i 's@SERVICE_REVERSE_PROXY_HOST@GLOBAL_REVERSE_PROXY_HOST@' docker-compose.test.yml
|
||||
sed -i 's@SERVICE_REVERSE_PROXY_URL@GLOBAL_REVERSE_PROXY_URL@' docker-compose.test.yml
|
||||
|
||||
if [[ $(sed '20!d' docker-compose.yml) = ' bwadm.example.com_SERVER_NAME: "bwadm.example.com"' ]] ; then
|
||||
sed -i '20d' docker-compose.yml
|
||||
fi
|
||||
|
||||
if [[ $(sed '24!d' docker-compose.yml) = " bwadm.example.com_CUSTOM_CONF_MODSEC_CRS_test_service_conf: 'SecRule REQUEST_FILENAME \"@rx ^/test\" \"id:2,ctl:ruleRemoveByTag=attack-generic,ctl:ruleRemoveByTag=attack-protocol,nolog\"'" ]] ; then
|
||||
sed -i '24d' docker-compose.yml
|
||||
fi
|
||||
|
||||
if [[ $(sed '18!d' docker-compose.test.yml) = ' SERVICE_SERVER_NAME: "bwadm.example.com"' ]] ; then
|
||||
sed -i '18d' docker-compose.test.yml
|
||||
fi
|
||||
|
||||
if [[ $(sed '23!d' docker-compose.test.yml) = " CUSTOM_CONF_SERVICE_MODSEC_CRS_test_service_conf: 'SecRule REQUEST_FILENAME \"@rx ^/test\" \"id:2,ctl:ruleRemoveByTag=attack-generic,ctl:ruleRemoveByTag=attack-protocol,nolog\"'" ]] ; then
|
||||
sed -i '23d' docker-compose.test.yml
|
||||
fi
|
||||
|
||||
if [[ $end -eq 1 && $exit_code = 0 ]] ; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "💾 Cleaning up current stack ..."
|
||||
|
||||
docker compose down -v --remove-orphans 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "💾 Down failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "💾 Cleaning up current stack done ✅"
|
||||
}
|
||||
|
||||
# Cleanup stack on exit
|
||||
trap cleanup_stack EXIT
|
||||
|
||||
echo "💾 Initializing workspace ..."
|
||||
rm -rf init/plugins
|
||||
mkdir -p init/plugins
|
||||
docker compose -f docker-compose.init.yml up --build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "💾 Build failed ❌"
|
||||
exit 1
|
||||
elif ! [[ -d "init/plugins/clamav" ]]; then
|
||||
echo "💾 ClamAV plugin not found ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker compose -f docker-compose.test.yml build
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "💾 Build failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for test in "local" "multisite" "mariadb" "mysql" "postgres"
|
||||
do
|
||||
if [ "$test" = "local" ] ; then
|
||||
echo "💾 Running tests with a local database ..."
|
||||
elif [ "$test" = "multisite" ] ; then
|
||||
echo "💾 Running tests with MULTISITE set to yes and with multisite settings ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@MULTISITE: "no"$@MULTISITE: "yes"@' {} \;
|
||||
sed -i '20i \ bwadm.example.com_SERVER_NAME: "bwadm.example.com"' docker-compose.yml
|
||||
sed -i "25i \ bwadm.example.com_CUSTOM_CONF_MODSEC_CRS_test_service_conf: 'SecRule REQUEST_FILENAME \"@rx ^/test\" \"id:2,ctl:ruleRemoveByTag=attack-generic,ctl:ruleRemoveByTag=attack-protocol,nolog\"'" docker-compose.yml
|
||||
sed -i 's@USE_REVERSE_PROXY@bwadm.example.com_USE_REVERSE_PROXY@' docker-compose.yml
|
||||
sed -i 's@REVERSE_PROXY_HOST@bwadm.example.com_REVERSE_PROXY_HOST@' docker-compose.yml
|
||||
sed -i 's@REVERSE_PROXY_URL@bwadm.example.com_REVERSE_PROXY_URL@' docker-compose.yml
|
||||
sed -i '18i \ SERVICE_SERVER_NAME: "bwadm.example.com"' docker-compose.test.yml
|
||||
sed -i "24i \ CUSTOM_CONF_SERVICE_MODSEC_CRS_test_service_conf: 'SecRule REQUEST_FILENAME \"@rx ^/test\" \"id:2,ctl:ruleRemoveByTag=attack-generic,ctl:ruleRemoveByTag=attack-protocol,nolog\"'" docker-compose.test.yml
|
||||
sed -i 's@GLOBAL_USE_REVERSE_PROXY@SERVICE_USE_REVERSE_PROXY@' docker-compose.test.yml
|
||||
sed -i 's@GLOBAL_REVERSE_PROXY_HOST@SERVICE_REVERSE_PROXY_HOST@' docker-compose.test.yml
|
||||
sed -i 's@GLOBAL_REVERSE_PROXY_URL@SERVICE_REVERSE_PROXY_URL@' docker-compose.test.yml
|
||||
elif [ "$test" = "mariadb" ] ; then
|
||||
echo "💾 Running tests with MariaDB database ..."
|
||||
echo "ℹ️ Keeping the MULTISITE variable to yes and multisite settings ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@DATABASE_URI: ".*"$@DATABASE_URI: "mariadb+pymysql://bunkerweb:secret\@bw-maria-db:3306/db"@' {} \;
|
||||
elif [ "$test" = "mysql" ] ; then
|
||||
echo "💾 Running tests with MySQL database ..."
|
||||
echo "ℹ️ Keeping the MULTISITE variable to yes and multisite settings ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@DATABASE_URI: ".*"$@DATABASE_URI: "mysql+pymysql://bunkerweb:secret\@bw-mysql-db:3306/db"@' {} \;
|
||||
elif [ "$test" = "postgres" ] ; then
|
||||
echo "💾 Running tests with PostgreSQL database ..."
|
||||
echo "ℹ️ Keeping the MULTISITE variable to yes and multisite settings ..."
|
||||
find . -type f -name 'docker-compose.*' -exec sed -i 's@DATABASE_URI: ".*"$@DATABASE_URI: "postgresql://bunkerweb:secret\@bw-postgres-db:5432/db"@' {} \;
|
||||
fi
|
||||
|
||||
echo "💾 Starting stack ..."
|
||||
docker compose up -d 2>/dev/null
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "💾 Up failed ❌"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if stack is healthy
|
||||
echo "💾 Waiting for stack to be healthy ..."
|
||||
i=0
|
||||
while [ $i -lt 120 ] ; do
|
||||
containers=("db-bw-1" "db-bw-scheduler-1")
|
||||
healthy="true"
|
||||
for container in "${containers[@]}" ; do
|
||||
check="$(docker inspect --format "{{json .State.Health }}" $container | grep "healthy")"
|
||||
if [ "$check" = "" ] ; then
|
||||
healthy="false"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$healthy" = "true" ] ; then
|
||||
echo "💾 Docker stack is healthy ✅"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
if [ $i -ge 120 ] ; then
|
||||
docker compose logs
|
||||
echo "💾 Docker stack is not healthy ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start tests
|
||||
|
||||
docker compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from tests 2>/dev/null
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "💾 Test \"$test\" failed ❌"
|
||||
echo "🛡️ Showing BunkerWeb and BunkerWeb Scheduler logs ..."
|
||||
docker compose logs bw bw-scheduler
|
||||
exit 1
|
||||
else
|
||||
echo "💾 Test \"$test\" succeeded ✅"
|
||||
fi
|
||||
|
||||
manual=1
|
||||
cleanup_stack
|
||||
manual=0
|
||||
|
||||
echo " "
|
||||
done
|
||||
|
||||
end=1
|
||||
echo "💾 Tests are done ! ✅"
|
|
@ -0,0 +1,14 @@
|
|||
FROM python:3.11.3-alpine
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN MAKEFLAGS="-j $(nproc)" pip install --no-cache -r requirements.txt && \
|
||||
rm -f requirements.txt
|
||||
|
||||
WORKDIR /opt/tests
|
||||
|
||||
COPY main.py .
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
|
@ -0,0 +1,9 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
init:
|
||||
build: init
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
volumes:
|
||||
- ./init/output:/output
|
|
@ -0,0 +1,18 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
tests:
|
||||
build: .
|
||||
environment:
|
||||
PYTHONUNBUFFERED: "1"
|
||||
USE_DNSBL: "yes"
|
||||
DNSBL_LIST: "bl.blocklist.de problems.dnsbl.sorbs.net"
|
||||
extra_hosts:
|
||||
- "www.example.com:192.168.0.2"
|
||||
networks:
|
||||
bw-services:
|
||||
ipv4_address: 192.168.0.3
|
||||
|
||||
networks:
|
||||
bw-services:
|
||||
external: true
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue