Merge branch 'dev' into ui

This commit is contained in:
florian 2023-05-06 18:38:24 +02:00
commit 48d7e72e54
No known key found for this signature in database
GPG Key ID: 3D80806F12602A7C
83 changed files with 1306 additions and 1071 deletions

View File

@ -7,405 +7,294 @@ on:
branches: [beta]
jobs:
# Build BW amd64 + i386 images
build-bw-amd64:
uses: ./.github/workflows/container-build.yml
with:
RELEASE: beta
ARCH: linux/amd64
IMAGE: bunkerweb
DOCKERFILE: src/bw/Dockerfile
CACHE: false
PUSH: false
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
build-bw-386:
uses: ./.github/workflows/container-build.yml
with:
RELEASE: beta
ARCH: linux/386
IMAGE: bunkerweb
DOCKERFILE: src/bw/Dockerfile
CACHE: false
PUSH: false
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
# Build SC amd64 + i386 images
build-sc-amd64:
# Build amd64 + 386 containers images
build-containers:
strategy:
matrix:
image: [bunkerweb, scheduler, autoconf, ui]
arch: [linux/amd64, linux/386]
include:
- release: beta
cache: false
push: false
- image: bunkerweb
dockerfile: src/bw/Dockerfile
- image: scheduler
dockerfile: src/scheduler/Dockerfile
- image: autoconf
dockerfile: src/autoconf/Dockerfile
- image: ui
dockerfile: src/ui/Dockerfile
- arch: linux/amd64
cache_suffix: amd64
- arch: linux/386
cache_suffix: "386"
uses: ./.github/workflows/container-build.yml
with:
RELEASE: beta
ARCH: linux/amd64
IMAGE: scheduler
DOCKERFILE: src/scheduler/Dockerfile
CACHE: false
PUSH: false
RELEASE: ${{ matrix.release }}
ARCH: ${{ matrix.arch }}
IMAGE: ${{ matrix.image }}
DOCKERFILE: ${{ matrix.dockerfile }}
CACHE: ${{ matrix.cache }}
PUSH: ${{ matrix.push }}
CACHE_SUFFIX: ${{ matrix.cache_suffix }}
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
build-sc-386:
uses: ./.github/workflows/container-build.yml
with:
RELEASE: beta
ARCH: linux/386
IMAGE: scheduler
DOCKERFILE: src/scheduler/Dockerfile
CACHE: false
PUSH: false
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
# Build AU amd64 + i386 images
build-au-amd64:
uses: ./.github/workflows/container-build.yml
with:
RELEASE: beta
ARCH: linux/amd64
IMAGE: autoconf
DOCKERFILE: src/autoconf/Dockerfile
CACHE: false
PUSH: false
# Create ARM environment
create-arm:
uses: ./.github/workflows/create-arm.yml
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
build-au-386:
uses: ./.github/workflows/container-build.yml
with:
RELEASE: beta
ARCH: linux/386
IMAGE: autoconf
DOCKERFILE: src/autoconf/Dockerfile
CACHE: false
PUSH: false
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
# Build UI amd64 + i386 images
build-ui-amd64:
uses: ./.github/workflows/container-build.yml
with:
RELEASE: beta
ARCH: linux/amd64
IMAGE: ui
DOCKERFILE: src/ui/Dockerfile
CACHE: false
PUSH: false
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
build-ui-386:
uses: ./.github/workflows/container-build.yml
with:
RELEASE: beta
ARCH: linux/386
IMAGE: ui
DOCKERFILE: src/ui/Dockerfile
CACHE: false
PUSH: false
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
SCW_ACCESS_KEY: ${{ secrets.SCW_ACCESS_KEY }}
SCW_SECRET_KEY: ${{ secrets.SCW_SECRET_KEY }}
SCW_DEFAULT_PROJECT_ID: ${{ secrets.SCW_DEFAULT_PROJECT_ID }}
SCW_DEFAULT_ORGANIZATION_ID: ${{ secrets.SCW_DEFAULT_ORGANIZATION_ID }}
ARM_SSH_KEY: ${{ secrets.ARM_SSH_KEY }}
ARM_SSH_CONFIG: ${{ secrets.ARM_SSH_CONFIG }}
# Build arm64 + arm/v7 images
build-bw-arm:
build-containers-arm:
needs: [create-arm]
strategy:
matrix:
image: [bunkerweb, scheduler, autoconf, ui]
arch: ["linux/arm64,linux/arm/v7"]
include:
- release: beta
cache: false
push: false
cache_suffix: arm
- image: bunkerweb
dockerfile: src/bw/Dockerfile
- image: scheduler
dockerfile: src/scheduler/Dockerfile
- image: autoconf
dockerfile: src/autoconf/Dockerfile
- image: ui
dockerfile: src/ui/Dockerfile
uses: ./.github/workflows/container-build.yml
with:
RELEASE: beta
ARCH: linux/arm64,linux/arm/v7
IMAGE: bunkerweb
DOCKERFILE: src/bw/Dockerfile
CACHE: false
PUSH: false
RELEASE: ${{ matrix.release }}
ARCH: ${{ matrix.arch }}
IMAGE: ${{ matrix.image }}
DOCKERFILE: ${{ matrix.dockerfile }}
CACHE: ${{ matrix.cache }}
PUSH: ${{ matrix.push }}
CACHE_SUFFIX: ${{ matrix.cache_suffix }}
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
build-sc-arm:
needs: ["build-bw-arm"]
uses: ./.github/workflows/container-build.yml
with:
RELEASE: beta
ARCH: linux/arm64,linux/arm/v7
IMAGE: scheduler
DOCKERFILE: src/scheduler/Dockerfile
CACHE: false
PUSH: false
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
build-au-arm:
needs: ["build-sc-arm"]
uses: ./.github/workflows/container-build.yml
with:
RELEASE: beta
ARCH: linux/arm64,linux/arm/v7
IMAGE: autoconf
DOCKERFILE: src/autoconf/Dockerfile
CACHE: false
PUSH: false
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
build-ui-arm:
needs: ["build-au-arm"]
uses: ./.github/workflows/container-build.yml
with:
RELEASE: beta
ARCH: linux/arm64,linux/arm/v7
IMAGE: ui
DOCKERFILE: src/ui/Dockerfile
CACHE: false
PUSH: false
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
ARM_SSH_KEY: ${{ secrets.ARM_SSH_KEY }}
ARM_SSH_IP: ${{ needs.create-arm.outputs.ip }}
ARM_SSH_CONFIG: ${{ secrets.ARM_SSH_CONFIG }}
# Linux packages
build-ubuntu:
# Build Linux packages
build-packages:
needs: [create-arm]
strategy:
matrix:
linux: [ubuntu, debian, fedora, rhel]
platforms: [linux/amd64, linux/arm64, linux/arm/v7]
include:
- release: beta
- linux: ubuntu
package: deb
- linux: debian
package: deb
- linux: fedora
package: rpm
- linux: rhel
package: rpm
exclude:
- linux: fedora
platforms: linux/arm/v7
- linux: rhel
platforms: linux/arm/v7
uses: ./.github/workflows/linux-build.yml
with:
RELEASE: beta
LINUX: ubuntu
PACKAGE: deb
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
build-debian:
uses: ./.github/workflows/linux-build.yml
with:
RELEASE: beta
LINUX: debian
PACKAGE: deb
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
# build-centos:
# uses: ./.github/workflows/linux-build.yml
# with:
# RELEASE: beta
# LINUX: centos
# PACKAGE: rpm
# secrets:
# DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
# DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
# PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
# PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
build-fedora:
uses: ./.github/workflows/linux-build.yml
with:
RELEASE: beta
LINUX: fedora
PACKAGE: rpm
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
build-rhel:
uses: ./.github/workflows/linux-build.yml
with:
RELEASE: beta
LINUX: rhel
PACKAGE: rpm
RELEASE: ${{ matrix.release }}
LINUX: ${{ matrix.linux }}
PACKAGE: ${{ matrix.package }}
TEST: false
PLATFORMS: ${{ matrix.platforms }}
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
ARM_SSH_KEY: ${{ secrets.ARM_SSH_KEY }}
ARM_SSH_IP: ${{ needs.create-arm.outputs.ip }}
ARM_SSH_CONFIG: ${{ secrets.ARM_SSH_CONFIG }}
# Wait for all builds and extract VERSION
wait-builds:
needs: [
build-bw-amd64, build-bw-386,
build-sc-amd64, build-sc-386,
build-au-amd64, build-au-386,
build-ui-amd64, build-ui-386,
build-ui-arm,
build-ubuntu,
build-debian,
build-fedora,
build-rhel
]
runs-on: ubuntu-latest
needs: [build-containers, build-containers-arm, build-packages]
outputs:
version: ${{ steps.getversion.outputs.version }}
versionrpm: ${{ steps.getversionrpm.outputs.versionrpm }}
steps:
- name: Checkout source code
uses: actions/checkout@v3
- name: Get VERSION
id: getversion
run: echo "::set-output name=version::$(cat src/VERSION | tr -d '\n')"
run: echo "version=$(cat src/VERSION | tr -d '\n')" >> "$GITHUB_OUTPUT"
- name: Get VERSION (for RPM based)
id: getversionrpm
run: echo "versionrpm=$(cat src/VERSION | tr -d '\n' | sed 's/-/_/g')" >> "$GITHUB_OUTPUT"
# Push Docker images
push-bunkerweb:
needs: [wait-builds]
push-images:
needs: [create-arm, wait-builds]
strategy:
matrix:
image: [bunkerweb, bunkerweb-scheduler, bunkerweb-autoconf, bunkerweb-ui]
include:
- release: beta
- image: bunkerweb
cache_from: bunkerweb
dockerfile: src/bw/Dockerfile
- image: bunkerweb-scheduler
cache_from: scheduler
dockerfile: src/scheduler/Dockerfile
- image: bunkerweb-autoconf
cache_from: autoconf
dockerfile: src/autoconf/Dockerfile
- image: bunkerweb-ui
cache_from: ui
dockerfile: src/ui/Dockerfile
uses: ./.github/workflows/push-docker.yml
with:
IMAGE: bunkerity/bunkerweb:beta,bunkerity/bunkerweb:${{ jobs.wait-builds.steps.getversion.outputs.version }}
CACHE_FROM: bunkerweb-beta
DOCKERFILE: src/bw/Dockerfile
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
ARM_SSH_KEY: ${{ secrets.ARM_SSH_KEY }}
ARM_SSH_CONFIG: ${{ secrets.ARM_SSH_CONFIG }}
push-scheduler:
needs: [wait-builds]
uses: ./.github/workflows/push-docker.yml
with:
IMAGE: bunkerity/bunkerweb-scheduler:beta,bunkerity/bunkerweb-scheduler:${{ jobs.wait-builds.steps.getversion.outputs.version }}
CACHE_FROM: scheduler-beta
DOCKERFILE: src/scheduler/Dockerfile
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
ARM_SSH_KEY: ${{ secrets.ARM_SSH_KEY }}
ARM_SSH_CONFIG: ${{ secrets.ARM_SSH_CONFIG }}
push-autoconf:
needs: [wait-builds]
uses: ./.github/workflows/push-docker.yml
with:
IMAGE: bunkerity/bunkerweb-autoconf:beta,bunkerity/bunkerweb-autoconf:${{ jobs.wait-builds.steps.getversion.outputs.version }}
CACHE_FROM: autoconf-beta
DOCKERFILE: src/autoconf/Dockerfile
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
ARM_SSH_KEY: ${{ secrets.ARM_SSH_KEY }}
ARM_SSH_CONFIG: ${{ secrets.ARM_SSH_CONFIG }}
push-ui:
needs: [wait-builds]
uses: ./.github/workflows/push-docker.yml
with:
IMAGE: bunkerity/bunkerweb-ui:beta,bunkerity/bunkerweb-ui:${{ jobs.wait-builds.steps.getversion.outputs.version }}
CACHE_FROM: ui-beta
DOCKERFILE: src/ui/Dockerfile
IMAGE: bunkerity/${{ matrix.image }}:${{ matrix.release }},bunkerity/${{ matrix.image }}:${{ needs.wait-builds.outputs.version }}
CACHE_FROM: ${{ matrix.cache_from }}-${{ matrix.release }}
DOCKERFILE: ${{ matrix.dockerfile }}
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
ARM_SSH_KEY: ${{ secrets.ARM_SSH_KEY }}
ARM_SSH_IP: ${{ needs.create-arm.outputs.ip }}
ARM_SSH_CONFIG: ${{ secrets.ARM_SSH_CONFIG }}
# Push Linux packages
push-ubuntu:
push-packages:
needs: [wait-builds]
strategy:
matrix:
linux: [ubuntu, debian, fedora, el]
arch: [amd64, arm64, armv7]
include:
- release: beta
repo: bunkerweb
- linux: ubuntu
separator: _
suffix: ""
version: jammy
package: deb
- linux: debian
separator: _
suffix: ""
version: bullseye
package: deb
- linux: fedora
separator: "-"
suffix: "1."
version: 38
package: rpm
- linux: el
separator: "-"
suffix: "1."
version: 8
package: rpm
- linux: ubuntu
arch: amd64
package_arch: amd64
- linux: debian
arch: amd64
package_arch: amd64
- linux: fedora
arch: amd64
package_arch: x86_64
- linux: el
arch: amd64
package_arch: x86_64
- linux: ubuntu
arch: arm64
package_arch: arm64
- linux: debian
arch: arm64
package_arch: arm64
- linux: fedora
arch: arm64
package_arch: aarch64
- linux: el
arch: amd64
package_arch: aarch64
- linux: ubuntu
arch: armv7
package_arch: armhf
- linux: debian
arch: armv7
package_arch: armhf
exclude:
- linux: fedora
arch: armv7
- linux: el
arch: armv7
uses: ./.github/workflows/push-packagecloud.yml
with:
SEPARATOR: _
SUFFIX: amd64
REPO: bunkerweb
LINUX: ubuntu
VERSION: jammy
PACKAGE: deb
BW_VERSION: ${{ jobs.wait-builds.steps.getversion.outputs.version }}
secrets:
PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }}
push-debian:
needs: [wait-builds]
uses: ./.github/workflows/push-packagecloud.yml
with:
SEPARATOR: _
SUFFIX: amd64
REPO: bunkerweb
LINUX: debian
VERSION: bullseye
PACKAGE: deb
BW_VERSION: ${{ jobs.wait-builds.steps.getversion.outputs.version }}
secrets:
PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }}
push-rhel:
needs: [wait-builds]
uses: ./.github/workflows/staging-push-packagecloud.yml
with:
SEPARATOR: "-"
SUFFIX: 1.x86_64
REPO: bunkerweb
LINUX: el
VERSION: 8
PACKAGE: rpm
BW_VERSION: ${{ jobs.wait-builds.steps.getversion.outputs.version }}
secrets:
PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }}
push-fedora:
needs: [wait-builds]
uses: ./.github/workflows/push-packagecloud.yml
with:
SEPARATOR: "-"
SUFFIX: 1.x86_64
REPO: bunkerweb
LINUX: fedora
VERSION: 37
PACKAGE: rpm
BW_VERSION: ${{ jobs.wait-builds.steps.getversion.outputs.version }}
SEPARATOR: ${{ matrix.separator }}
SUFFIX: ${{ matrix.suffix }}
REPO: ${{ matrix.repo }}
LINUX: ${{ matrix.linux }}
VERSION: ${{ matrix.separator }}
PACKAGE: ${{ matrix.package }}
BW_VERSION: ${{ $matrix.package == 'rpm' && needs.wait-builds.outputs.versionrpm || needs.wait-builds.outputs.version }}
PACKAGE_ARCH: ${{ matrix.package_arch }}
secrets:
PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }}
# Create doc PDF
doc-pdf:
needs: [
push-bunkerweb,
push-scheduler,
push-autoconf,
push-ui,
push-ubuntu,
push-debian,
push-rhel,
push-fedora
]
needs: [wait-builds, push-images, push-packages]
uses: ./.github/workflows/doc-to-pdf.yml
with:
VERSION: ${{ jobs.wait-builds.steps.getversion.outputs.version }}
VERSION: ${{ needs.wait-builds.outputs.version }}
# Push on GH
push-gh:
needs: [doc-pdf]
needs: [wait-builds, doc-pdf]
permissions:
contents: write
discussions: write
uses: ./.github/workflows/push-github.yml
with:
VERSION: ${{ jobs.wait-builds.steps.getversion.outputs.version }}
VERSION: ${{ needs.wait-builds.outputs.version }}
PRERELEASE: true
# Push doc
push-doc:
needs: [publish-gh]
needs: [wait-builds, push-gh]
permissions:
contents: write
uses: ./.github/workflows/push-doc.yml
with:
VERSION: ${{ jobs.wait-builds.steps.getversion.outputs.version }}
VERSION: ${{ needs.wait-builds.outputs.version }}
ALIAS: beta
secrets:
BUNKERBOT_TOKEN: ${{ secrets.BUNKERBOT_TOKEN }}
# Remove ARM VM
rm-arm:
if: ${{ always() }}
needs: [create-arm, push-images, build-packages]
uses: ./.github/workflows/rm-arm.yml
secrets:
ARM_ID: ${{ needs.create-arm.outputs.id }}
SCW_ACCESS_KEY: ${{ secrets.SCW_ACCESS_KEY }}
SCW_SECRET_KEY: ${{ secrets.SCW_SECRET_KEY }}
SCW_DEFAULT_PROJECT_ID: ${{ secrets.SCW_DEFAULT_PROJECT_ID }}
SCW_DEFAULT_ORGANIZATION_ID: ${{ secrets.SCW_DEFAULT_ORGANIZATION_ID }}

View File

@ -23,6 +23,10 @@ on:
required: false
type: boolean
default: true
CACHE_SUFFIX:
required: false
type: string
default: ""
secrets:
DOCKER_USERNAME:
required: true
@ -32,6 +36,12 @@ on:
required: false
PRIVATE_REGISTRY_TOKEN:
required: false
ARM_SSH_KEY:
required: false
ARM_SSH_IP:
required: false
ARM_SSH_CONFIG:
required: false
jobs:
build:
@ -40,8 +50,26 @@ jobs:
# Prepare
- name: Checkout source code
uses: actions/checkout@v3
- name: Setup SSH for ARM node
if: inputs.CACHE_SUFFIX == 'arm'
run: |
mkdir -p ~/.ssh
echo "$SSH_KEY" > ~/.ssh/id_rsa_arm
chmod 600 ~/.ssh/id_rsa_arm
echo "$SSH_CONFIG" | sed "s/SSH_IP/$SSH_IP/g" > ~/.ssh/config
env:
SSH_KEY: ${{ secrets.ARM_SSH_KEY }}
SSH_IP: ${{ secrets.ARM_SSH_IP }}
SSH_CONFIG: ${{ secrets.ARM_SSH_CONFIG }}
- name: Setup Buildx
uses: docker/setup-buildx-action@v2
if: inputs.CACHE_SUFFIX != 'arm'
- name: Setup Buildx (ARM)
uses: docker/setup-buildx-action@v2
if: inputs.CACHE_SUFFIX == 'arm'
with:
endpoint: ssh://root@arm
platforms: linux/arm64,linux/arm/v7,linux/arm/v6
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
@ -74,14 +102,16 @@ jobs:
context: .
file: ${{ inputs.DOCKERFILE }}
platforms: ${{ inputs.ARCH }}
load: true
load: ${{ inputs.CACHE_SUFFIX != 'arm' }}
tags: local/${{ inputs.IMAGE }}
cache-to: type=registry,ref=bunkerity/cache:${{ inputs.IMAGE }}-${{ inputs.RELEASE }}-${{ inputs.ARCH }},mode=min
cache-to: type=registry,ref=bunkerity/cache:${{ inputs.IMAGE }}-${{ inputs.RELEASE }}-${{ inputs.CACHE_SUFFIX }},mode=min
# Check OS vulnerabilities
- name: Check OS vulnerabilities
if: ${{ inputs.CACHE_SUFFIX != 'arm' }}
uses: aquasecurity/trivy-action@master
with:
vuln-type: os
skip-dirs: /root/.cargo
image-ref: local/${{ inputs.IMAGE }}
format: table
exit-code: 1

75
.github/workflows/create-arm.yml vendored Normal file
View File

@ -0,0 +1,75 @@
name: Create ARM node (REUSABLE)
on:
workflow_call:
outputs:
id:
description: "ARM ID"
value: ${{ jobs.build.outputs.id }}
ip:
description: "ARM IP"
value: ${{ jobs.build.outputs.ip }}
secrets:
SCW_ACCESS_KEY:
required: true
SCW_SECRET_KEY:
required: true
SCW_DEFAULT_PROJECT_ID:
required: true
SCW_DEFAULT_ORGANIZATION_ID:
required: true
ARM_SSH_KEY:
required: true
ARM_SSH_CONFIG:
required: true
jobs:
build:
runs-on: ubuntu-latest
outputs:
json: ${{ steps.scw.outputs.json }}
id: ${{ steps.getinfo.outputs.id }}
ip: ${{ steps.getinfo.outputs.ip }}
steps:
# Prepare
- name: Checkout source code
uses: actions/checkout@v3
- name: Create ARM VM
id: scw
uses: scaleway/action-scw@bbcfd65cd2af73456ce439088e0d42c1657c4c38
with:
args: instance server create zone=fr-par-2 type=AMP2-C60 root-volume=block:50GB
save-config: true
version: v2.13.0
access-key: ${{ secrets.SCW_ACCESS_KEY }}
secret-key: ${{ secrets.SCW_SECRET_KEY }}
default-project-id: ${{ secrets.SCW_DEFAULT_PROJECT_ID }}
default-organization-id: ${{ secrets.SCW_DEFAULT_ORGANIZATION_ID }}
- name: Get info
id: getinfo
run: |
echo "id=${{ fromJson(steps.scw.outputs.json).id }}" >> "$GITHUB_OUTPUT"
echo "ip=${{ fromJson(steps.scw.outputs.json).public_ip.address }}" >> "$GITHUB_OUTPUT"
- name: Wait for VM
run: scw instance server wait ${{ fromJson(steps.scw.outputs.json).ID }} zone=fr-par-2
- name: Wait for SSH
uses: iFaxity/wait-on-action@628831cec646e6dacca502f34a6c6b46e131e51d
with:
resource: tcp:${{ fromJson(steps.scw.outputs.json).public_ip.address }}:22
timeout: 300000
- name: Setup SSH for ARM node
run: |
mkdir -p ~/.ssh
echo "$SSH_KEY" > ~/.ssh/id_rsa_arm
chmod 600 ~/.ssh/id_rsa_arm
echo "$SSH_CONFIG" | sed "s/SSH_IP/$SSH_IP/g" > ~/.ssh/config
env:
SSH_KEY: ${{ secrets.ARM_SSH_KEY }}
SSH_IP: ${{ fromJson(steps.scw.outputs.json).public_ip.address }}
SSH_CONFIG: ${{ secrets.ARM_SSH_CONFIG }}
- name: Install Docker
run: ssh root@$SSH_IP "curl -fsSL https://test.docker.com -o test-docker.sh ; sh test-docker.sh"
env:
SSH_IP: ${{ fromJson(steps.scw.outputs.json).public_ip.address }}

View File

@ -4,7 +4,6 @@ permissions:
contents: write
on:
workflow_dispatch:
schedule:
- cron: "0 1 5 * *"
@ -12,22 +11,51 @@ jobs:
mmdb-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
- name: Checkout source code
uses: actions/checkout@v3
with:
fetch-depth: 0
token: ${{ secrets.BUNKERBOT_TOKEN }}
ref: dev
- name: Download mmdb files
run: |
mkdir -p src/bw/misc/
cd src/bw/misc/
curl -s -o asn.mmdb.gz https://download.db-ip.com/free/dbip-asn-lite-$(date +%Y-%m).mmdb.gz
curl -s -o country.mmdb.gz https://download.db-ip.com/free/dbip-country-lite-$(date +%Y-%m).mmdb.gz
CURL_RETURN_CODE=0
CURL_OUTPUT=`curl -w httpcode=%{http_code} -s -o asn.mmdb.gz https://download.db-ip.com/free/dbip-asn-lite-$(date +%Y-%m).mmdb.gz 2> /dev/null` || CURL_RETURN_CODE=$?
if [ ${CURL_RETURN_CODE} -ne 0 ]; then
echo "Curl connection failed when downloading asn-lite mmdb file with return code - ${CURL_RETURN_CODE}"
exit 1
else
echo "Curl connection success"
# Check http code for curl operation/response in CURL_OUTPUT
httpCode=$(echo "${CURL_OUTPUT}" | sed -e 's/.*\httpcode=//')
if [ ${httpCode} -ne 200 ]; then
echo "Curl operation/command failed due to server return code - ${httpCode}"
exit 1
fi
fi
CURL_RETURN_CODE=0
CURL_OUTPUT=`curl -w httpcode=%{http_code} -s -o country.mmdb.gz https://download.db-ip.com/free/dbip-country-lite-$(date +%Y-%m).mmdb.gz 2> /dev/null` || CURL_RETURN_CODE=$?
if [ ${CURL_RETURN_CODE} -ne 0 ]; then
echo "Curl connection failed when downloading country-lite mmdb file with return code - ${CURL_RETURN_CODE}"
exit 1
else
echo "Curl connection success"
# Check http code for curl operation/response in CURL_OUTPUT
httpCode=$(echo "${CURL_OUTPUT}" | sed -e 's/.*\httpcode=//')
if [ ${httpCode} -ne 200 ]; then
echo "Curl operation/command failed due to server return code - ${httpCode}"
exit 1
fi
fi
rm -f asn.mmdb country.mmdb
gunzip asn.mmdb.gz
gunzip country.mmdb.gz
gunzip asn.mmdb.gz country.mmdb.gz
- name: Commit and push changes
uses: stefanzweifel/git-auto-commit-action@v4
with:
branch: dev
commit_message: "Monthly mmdb update"
commit_options: "--no-verify"
commit_user_name: "GitHub Actions"
commit_user_email: "tdiot@bunkerity.com"
commit_user_name: "BunkerBot"
commit_user_email: "bunkerbot@bunkerity.com"

View File

@ -9,49 +9,25 @@ on:
jobs:
# Containers
build-bw:
build-containers:
strategy:
matrix:
image: [bunkerweb, scheduler, autoconf, ui]
include:
- image: bunkerweb
dockerfile: src/bw/Dockerfile
- image: scheduler
dockerfile: src/scheduler/Dockerfile
- image: autoconf
dockerfile: src/autoconf/Dockerfile
- image: ui
dockerfile: src/ui/Dockerfile
uses: ./.github/workflows/container-build.yml
with:
RELEASE: dev
ARCH: linux/amd64
IMAGE: bunkerweb
DOCKERFILE: src/bw/Dockerfile
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
build-sc:
uses: ./.github/workflows/container-build.yml
with:
RELEASE: dev
ARCH: linux/amd64
IMAGE: scheduler
DOCKERFILE: src/scheduler/Dockerfile
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
build-au:
uses: ./.github/workflows/container-build.yml
with:
RELEASE: dev
ARCH: linux/amd64
IMAGE: autoconf
DOCKERFILE: src/autoconf/Dockerfile
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
build-ui:
uses: ./.github/workflows/container-build.yml
with:
RELEASE: dev
ARCH: linux/amd64
IMAGE: ui
DOCKERFILE: src/ui/Dockerfile
IMAGE: ${{ matrix.image }}
DOCKERFILE: ${{ matrix.dockerfile }}
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
@ -84,7 +60,7 @@ jobs:
# UI tests
tests-ui:
needs: [code-security, build-bw, build-sc, build-ui]
needs: [code-security, build-containers]
uses: ./.github/workflows/tests-ui.yml
with:
RELEASE: dev

View File

@ -21,7 +21,7 @@ jobs:
- name: Install doc requirements
run: pip install -r docs/requirements.txt
- name: Install chromium
run: apt install chromium-browser
run: sudo apt install chromium-browser
- name: Install node
uses: actions/setup-node@v3
with:

View File

@ -12,6 +12,20 @@ on:
PACKAGE:
required: true
type: string
PLATFORMS:
required: true
type: string
TEST:
required: false
type: boolean
default: false
ARM_SSH_KEY:
required: false
ARM_SSH_IP:
required: false
ARM_SSH_CONFIG:
required: false
secrets:
DOCKER_USERNAME:
required: true
@ -29,8 +43,31 @@ jobs:
# Prepare
- name: Checkout source code
uses: actions/checkout@v3
- name: Extract arch
run : |
echo "ARCH=${{ env.PLATFORMS }}" | sed 's/linux//g' | sed 's@/@@g' >> "$GITHUB_ENV"
env:
PLATFORMS: ${{ inputs.PLATFORMS }}
- name: Setup SSH for ARM node
if: startsWith(env.ARCH, "arm") == true
run: |
mkdir -p ~/.ssh
echo "$SSH_KEY" > ~/.ssh/id_rsa_arm
chmod 600 ~/.ssh/id_rsa_arm
echo "$SSH_CONFIG" | sed "s/SSH_IP/$SSH_IP/g" > ~/.ssh/config
env:
SSH_KEY: ${{ secrets.ARM_SSH_KEY }}
SSH_IP: ${{ secrets.ARM_SSH_IP }}
SSH_CONFIG: ${{ secrets.ARM_SSH_CONFIG }}
- name: Setup Buildx
uses: docker/setup-buildx-action@v2
if: startsWith(env.ARCH, "arm") == false
- name: Setup Buildx (ARM)
uses: docker/setup-buildx-action@v2
if: startsWith(env.ARCH, "arm") == true
with:
endpoint: ssh://root@arm
platforms: linux/arm64,linux/arm/v7,linux/arm/v6
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
@ -50,7 +87,7 @@ jobs:
context: .
load: true
file: src/linux/Dockerfile-${{ inputs.LINUX }}
platforms: linux/amd64
platforms: ${{ inputs.PLATFORMS }}
tags: local/bunkerweb-${{ inputs.LINUX }}:latest
cache-from: type=registry,ref=bunkerity/cache:${{ inputs.LINUX }}-staging
cache-to: type=registry,ref=bunkerity/cache:${{ inputs.LINUX }}-staging,mode=min
@ -62,21 +99,22 @@ jobs:
context: .
load: true
file: src/linux/Dockerfile-${{ inputs.LINUX }}
platforms: linux/amd64
platforms: ${{ inputs.PLATFORMS }}
tags: local/bunkerweb-${{ inputs.LINUX }}:latest
# Generate package
- name: Generate package
run: ./src/linux/package.sh ${{ inputs.LINUX }}
- uses: actions/upload-artifact@v3
with:
name: package-${{ inputs.LINUX }}
name: package-${{ inputs.LINUX }}-${{ env.ARCH }}
path: package-${{ inputs.LINUX }}/*.${{ inputs.PACKAGE }}
# Build test image
- name: Build test image
if: inputs.TEST == true
uses: docker/build-push-action@v3
with:
context: .
file: tests/linux/Dockerfile-${{ inputs.LINUX }}
platforms: linux/amd64
platforms: ${{ inputs.PLATFORMS }}
push: true
tags: ${{ secrets.PRIVATE_REGISTRY }}/infra/${{ inputs.LINUX }}-tests:${{ inputs.RELEASE }}

View File

@ -9,6 +9,9 @@ on:
ALIAS:
required: true
type: string
secrets:
BUNKERBOT_TOKEN:
required: true
jobs:
build:
@ -16,7 +19,14 @@ jobs:
steps:
- name: Checkout source code
uses: actions/checkout@v3
uses: actions/setup-python@v4
with:
fetch-depth: 0
token: ${{ secrets.BUNKERBOT_TOKEN }}
- name: Setup git user
run: |
git config --global user.name "BunkerBot"
git config --global user.email "bunkerbot@bunkerity.com"
- uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install doc requirements

View File

@ -21,20 +21,16 @@ on:
required: true
ARM_SSH_CONFIG:
required: true
ARM_SSH_IP:
required: true
jobs:
push:
runs-on: ubuntu-latest
steps:
# Prepare
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Setup Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Check out repository code
uses: actions/checkout@v3
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
@ -45,13 +41,16 @@ jobs:
mkdir -p ~/.ssh
echo "$SSH_KEY" > ~/.ssh/id_rsa_arm
chmod 600 ~/.ssh/id_rsa_arm
echo "$SSH_CONFIG" > ~/.ssh/config
echo "$SSH_CONFIG" | sed "s/SSH_IP/$SSH_IP/g" > ~/.ssh/config
env:
SSH_KEY: ${{ secrets.ARM_SSH_KEY }}
SSH_IP: ${{ secrets.ARM_SSH_IP }}
SSH_CONFIG: ${{ secrets.ARM_SSH_CONFIG }}
- name: Append ARM node to buildx
run: |
docker buildx create --append --name ${{ steps.buildx.outputs.name }} --node arm --platform linux/arm64,linux/arm/v7,linux/arm/v6 ssh://ubuntu@arm
- name: Setup Buildx (ARM)
uses: docker/setup-buildx-action@v2
with:
endpoint: ssh://root@arm
platforms: linux/arm64,linux/arm/v7,linux/arm/v6
# Build and push
- name: Build and push
uses: docker/build-push-action@v3

View File

@ -21,17 +21,22 @@ jobs:
uses: actions/download-artifact@v3
with:
name: BunkerWeb_documentation_v${{ inputs.VERSION }}.pdf
path: BunkerWeb_documentation_v${{ inputs.VERSION }}.pdf
# Create tag
- uses: rickstaa/action-create-tag@v1
name: Create tag
with:
tag: "v${{ inputs.VERSION }}"
message: "v${{ inputs.VERSION }}"
force_push_tag: true
# Extract changelog
- name: Extract changelog
id: getchangelog
run: echo "::set-output name=content::$(awk -v n=2 '/##/{n--}; n > 0' CHANGELOG.md | grep -v '# Changelog' | grep -v '##' | sed '/^$/d')"
run: |
content=$(awk -v n=2 '/##/{n--}; n > 0' CHANGELOG.md | grep -v '# Changelog' | grep -v '##' | sed '/^$/d')
content="${content//'%'/'%25'}"
content="${content//$'\n'/'%0A'}"
content="${content//$'\r'/'%0D'}"
echo "::set-output name=content::$content"
# Create release
- name: Create release
uses: softprops/action-gh-release@v1
@ -48,10 +53,11 @@ jobs:
Linux packages : https://packagecloud.io/app/bunkerity/bunkerweb/search?q=${{ inputs.VERSION }}&filter=all&dist=
Changelog :
${{steps.getchangelog.outputs.content}}
draft: false
${{ steps.getchangelog.outputs.content }}
draft: true
prerelease: ${{ inputs.PRERELEASE }}
name: v${{ inputs.VERSION }}
tag_name: v${{ inputs.VERSION }}
discussion_category_name: Announcements
files: BunkerWeb_documentation_v${{ inputs.VERSION }}.pdf

View File

@ -24,6 +24,12 @@ on:
BW_VERSION:
required: true
type: string
ARCH:
required: true
type: string
PACKAGE_ARCH:
required: true
type: string
secrets:
PACKAGECLOUD_TOKEN:
required: true
@ -43,12 +49,18 @@ jobs:
run: gem install package_cloud
# Download packages
- uses: actions/download-artifact@v3
if: inputs.LINUX != 'el'
with:
name: package-${{ inputs.LINUX }}
name: package-${{ inputs.LINUX }}-${{ inputs.ARCH }}
path: /tmp/${{ inputs.LINUX }}
- uses: actions/download-artifact@v3
if: inputs.LINUX == 'el'
with:
name: package-rhel
path: /tmp/${{ inputs.LINUX }}-${{ inputs.ARCH }}
# Remove existing packages
- name: Remove existing package
run: package_cloud yank bunkerity/${{ inputs.REPO }}/${{ inputs.LINUX }}/${{ inputs.VERSION }} bunkerweb${{ inputs.SEPARATOR }}${{ inputs.BW_VERSION }}${{ inputs.SEPARATOR }}${{ inputs.SUFFIX }}.${{ inputs.PACKAGE }}
run: package_cloud yank bunkerity/${{ inputs.REPO }}/${{ inputs.LINUX }}/${{ inputs.VERSION }} bunkerweb${{ inputs.SEPARATOR }}${{ inputs.BW_VERSION }}${{ inputs.SEPARATOR }}${{ inputs.SUFFIX }}${{ inputs.PACKAGE_ARCH }}.${{ inputs.PACKAGE }}
continue-on-error: true
env:
PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }}
@ -56,7 +68,7 @@ jobs:
- name: Push package to packagecloud
uses: danielmundi/upload-packagecloud@v1
with:
PACKAGE-NAME: /tmp/${{ inputs.LINUX }}/bunkerweb${{ inputs.SEPARATOR }}${{ inputs.BW_VERSION }}${{ inputs.SEPARATOR }}${{ inputs.SUFFIX }}.${{ inputs.PACKAGE }}
PACKAGE-NAME: /tmp/${{ inputs.LINUX }}/*.${{ inputs.PACKAGE }}
PACKAGECLOUD-USERNAME: bunkerity
PACKAGECLOUD-REPO: ${{ inputs.REPO }}
PACKAGECLOUD-DISTRIB: ${{ inputs.LINUX }}/${{ inputs.VERSION }}

View File

@ -223,6 +223,7 @@ jobs:
# Wait for all builds and extract VERSION
wait-builds:
runs-on: ubuntu-latest
needs: [
build-bw-amd64, build-bw-386,
build-sc-amd64, build-sc-386,
@ -234,19 +235,21 @@ jobs:
build-fedora,
build-rhel
]
outputs:
version: ${{ steps.getversion.outputs.version }}
steps:
- name: Checkout source code
uses: actions/checkout@v3
- name: Get VERSION
id: getversion
run: echo "::set-output name=version::$(cat src/VERSION | tr -d '\n')"
run: echo "version=$(cat src/VERSION | tr -d '\n')" >> "$GITHUB_OUTPUT"
# Push Docker images
push-bunkerweb:
needs: [wait-builds]
uses: ./.github/workflows/push-docker.yml
with:
IMAGE: bunkerity/bunkerweb:latest,bunkerity/bunkerweb:${{ jobs.wait-builds.steps.getversion.outputs.version }}
IMAGE: bunkerity/bunkerweb:latest,bunkerity/bunkerweb:${{ needs.wait-builds.outputs.version }}
CACHE_FROM: bunkerweb-latest
DOCKERFILE: src/bw/Dockerfile
secrets:
@ -258,7 +261,7 @@ jobs:
needs: [wait-builds]
uses: ./.github/workflows/push-docker.yml
with:
IMAGE: bunkerity/bunkerweb-scheduler:latest,bunkerity/bunkerweb-scheduler:${{ jobs.wait-builds.steps.getversion.outputs.version }}
IMAGE: bunkerity/bunkerweb-scheduler:latest,bunkerity/bunkerweb-scheduler:${{ needs.wait-builds.outputs.version }}
CACHE_FROM: scheduler-latest
DOCKERFILE: src/scheduler/Dockerfile
secrets:
@ -270,7 +273,7 @@ jobs:
needs: [wait-builds]
uses: ./.github/workflows/push-docker.yml
with:
IMAGE: bunkerity/bunkerweb-autoconf:latest,bunkerity/bunkerweb-autoconf:${{ jobs.wait-builds.steps.getversion.outputs.version }}
IMAGE: bunkerity/bunkerweb-autoconf:latest,bunkerity/bunkerweb-autoconf:${{ needs.wait-builds.outputs.version }}
CACHE_FROM: autoconf-latest
DOCKERFILE: src/autoconf/Dockerfile
secrets:
@ -282,7 +285,7 @@ jobs:
needs: [wait-builds]
uses: ./.github/workflows/push-docker.yml
with:
IMAGE: bunkerity/bunkerweb-ui:latest,bunkerity/bunkerweb-ui:${{ jobs.wait-builds.steps.getversion.outputs.version }}
IMAGE: bunkerity/bunkerweb-ui:latest,bunkerity/bunkerweb-ui:${{ needs.wait-builds.outputs.version }}
CACHE_FROM: ui-latest
DOCKERFILE: src/ui/Dockerfile
secrets:
@ -302,7 +305,7 @@ jobs:
LINUX: ubuntu
VERSION: jammy
PACKAGE: deb
BW_VERSION: ${{ jobs.wait-builds.steps.getversion.outputs.version }}
BW_VERSION: ${{ needs.wait-builds.outputs.version }}
secrets:
PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }}
push-debian:
@ -315,12 +318,12 @@ jobs:
LINUX: debian
VERSION: bullseye
PACKAGE: deb
BW_VERSION: ${{ jobs.wait-builds.steps.getversion.outputs.version }}
BW_VERSION: ${{ needs.wait-builds.outputs.version }}
secrets:
PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }}
push-rhel:
needs: [wait-builds]
uses: ./.github/workflows/staging-push-packagecloud.yml
uses: ./.github/workflows/push-packagecloud.yml
with:
SEPARATOR: "-"
SUFFIX: 1.x86_64
@ -328,7 +331,7 @@ jobs:
LINUX: el
VERSION: 8
PACKAGE: rpm
BW_VERSION: ${{ jobs.wait-builds.steps.getversion.outputs.version }}
BW_VERSION: ${{ needs.wait-builds.outputs.version }}
secrets:
PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }}
push-fedora:
@ -341,13 +344,14 @@ jobs:
LINUX: fedora
VERSION: 37
PACKAGE: rpm
BW_VERSION: ${{ jobs.wait-builds.steps.getversion.outputs.version }}
BW_VERSION: ${{ needs.wait-builds.outputs.version }}
secrets:
PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }}
# Create doc PDF
doc-pdf:
needs: [
wait-builds,
push-bunkerweb,
push-scheduler,
push-autoconf,
@ -359,26 +363,26 @@ jobs:
]
uses: ./.github/workflows/doc-to-pdf.yml
with:
VERSION: ${{ jobs.wait-builds.steps.getversion.outputs.version }}
VERSION: ${{ needs.wait-builds.outputs.version }}
# Push on GH
push-gh:
needs: [doc-pdf]
needs: [wait-builds, doc-pdf]
permissions:
contents: write
uses: ./.github/workflows/push-github.yml
with:
VERSION: ${{ jobs.wait-builds.steps.getversion.outputs.version }}
VERSION: ${{ needs.wait-builds.outputs.version }}
PRERELEASE: false
# Push doc
push-doc:
needs: [publish-gh]
needs: [wait-builds, push-gh]
permissions:
contents: write
uses: ./.github/workflows/push-doc.yml
with:
VERSION: ${{ jobs.wait-builds.steps.getversion.outputs.version }}
VERSION: ${{ needs.wait-builds.outputs.version }}
ALIAS: latest
# Update Docker Hub description
@ -398,9 +402,6 @@ jobs:
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Extract changelog
id: getchangelog
run: echo "::set-output name=content::$(awk -v n=2 '/##/{n--}; n > 0' CHANGELOG.md | grep -v '# Changelog' | grep -v '##' | sed '/^$/d')"
- name: Update Docker Hub description for BW
uses: peter-evans/dockerhub-description@v3
with:

33
.github/workflows/rm-arm.yml vendored Normal file
View File

@ -0,0 +1,33 @@
name: Create ARM node (REUSABLE)
on:
workflow_call:
secrets:
SCW_ACCESS_KEY:
required: true
SCW_SECRET_KEY:
required: true
SCW_DEFAULT_PROJECT_ID:
required: true
SCW_DEFAULT_ORGANIZATION_ID:
required: true
ARM_ID:
required: true
jobs:
rm:
if: ${{ always() }}
runs-on: ubuntu-latest
steps:
# Prepare
- name: Checkout source code
uses: actions/checkout@v3
- name: Delete ARM VM
uses: scaleway/action-scw@bbcfd65cd2af73456ce439088e0d42c1657c4c38
with:
args: instance server delete ${{ secrets.ARM_ID }} zone=fr-par-2 with-ip=true with-volumes=all force-shutdown=true
version: v2.13.0
access-key: ${{ secrets.SCW_ACCESS_KEY }}
secret-key: ${{ secrets.SCW_SECRET_KEY }}
default-project-id: ${{ secrets.SCW_DEFAULT_PROJECT_ID }}
default-organization-id: ${{ secrets.SCW_DEFAULT_ORGANIZATION_ID }}

View File

@ -7,112 +7,61 @@ on:
branches: [staging]
jobs:
# Containers
build-bw:
# Build Docker images
build-containers:
strategy:
matrix:
image: [bunkerweb, scheduler, autoconf, ui]
include:
- image: bunkerweb
dockerfile: src/bw/Dockerfile
- image: scheduler
dockerfile: src/scheduler/Dockerfile
- image: autoconf
dockerfile: src/autoconf/Dockerfile
- image: ui
dockerfile: src/ui/Dockerfile
uses: ./.github/workflows/container-build.yml
with:
RELEASE: staging
ARCH: linux/amd64
IMAGE: bunkerweb
DOCKERFILE: src/bw/Dockerfile
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
build-scheduler:
uses: ./.github/workflows/container-build.yml
with:
RELEASE: staging
ARCH: linux/amd64
IMAGE: scheduler
DOCKERFILE: src/scheduler/Dockerfile
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
# build-autoconf:
# uses: ./.github/workflows/container-build.yml
# with:
# RELEASE: staging
# ARCH: linux/amd64
# IMAGE: autoconf
# DOCKERFILE: src/autoconf/Dockerfile
# secrets:
# DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
# DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
# PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
# PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
build-ui:
uses: ./.github/workflows/container-build.yml
with:
RELEASE: staging
ARCH: linux/amd64
IMAGE: ui
DOCKERFILE: src/ui/Dockerfile
CACHE: true
PUSH: true
IMAGE: ${{ matrix.image }}
DOCKERFILE: ${{ matrix.dockerfile }}
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
# Linux
# build-ubuntu:
# uses: ./.github/workflows/linux-build.yml
# with:
# RELEASE: staging
# LINUX: ubuntu
# PACKAGE: deb
# secrets:
# DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
# DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
# PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
# PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
# build-debian:
# uses: ./.github/workflows/linux-build.yml
# with:
# RELEASE: staging
# LINUX: debian
# PACKAGE: deb
# secrets:
# DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
# DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
# PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
# PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
# build-centos:
# uses: ./.github/workflows/linux-build.yml
# with:
# RELEASE: staging
# LINUX: centos
# PACKAGE: rpm
# secrets:
# DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
# DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
# PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
# PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
# build-fedora:
# uses: ./.github/workflows/linux-build.yml
# with:
# RELEASE: staging
# LINUX: fedora
# PACKAGE: rpm
# secrets:
# DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
# DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
# PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
# PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
# build-rhel:
# uses: ./.github/workflows/linux-build.yml
# with:
# RELEASE: staging
# LINUX: rhel
# PACKAGE: rpm
# secrets:
# DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
# DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
# PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
# PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
# Build Linux packages
build-packages:
strategy:
matrix:
linux: [ubuntu, debian, fedora, rhel]
include:
- linux: ubuntu
package: deb
- linux: debian
package: deb
- linux: fedora
package: rpm
- linux: rhel
package: rpm
uses: ./.github/workflows/linux-build.yml
with:
RELEASE: staging
LINUX: ${{ matrix.linux }}
PACKAGE: ${{ matrix.package }}
TEST: true
PLATFORMS: linux/amd64
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
PRIVATE_REGISTRY: ${{ secrets.PRIVATE_REGISTRY }}
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
# Code security
code-security:
@ -139,87 +88,41 @@ jobs:
category: "/language:${{matrix.language}}"
# Create infrastructures
# create-infra-docker:
# needs: [code-security, build-bw, build-scheduler]
# uses: ./.github/workflows/staging-create-infra.yml
# with:
# TYPE: docker
# secrets:
# CICD_SECRETS: ${{ secrets.CICD_SECRETS }}
# create-infra-autoconf:
# needs: [code-security, build-bw, build-scheduler, build-autoconf]
# uses: ./.github/workflows/staging-create-infra.yml
# with:
# TYPE: autoconf
# secrets:
# CICD_SECRETS: ${{ secrets.CICD_SECRETS }}
# create-infra-swarm:
# needs: [code-security, build-bw, build-scheduler, build-autoconf]
# uses: ./.github/workflows/staging-create-infra.yml
# with:
# TYPE: swarm
# secrets:
# CICD_SECRETS: ${{ secrets.CICD_SECRETS }}
# create-infra-k8s:
# needs: [code-security, build-bw, build-scheduler, build-autoconf]
# uses: ./.github/workflows/staging-create-infra.yml
# with:
# TYPE: k8s
# secrets:
# CICD_SECRETS: ${{ secrets.CICD_SECRETS }}
# create-infra-linux:
# needs:
# [
# code-security,
# build-ubuntu,
# build-debian,
# build-fedora,
# build-rhel,
# ]
# uses: ./.github/workflows/staging-create-infra.yml
# with:
# TYPE: linux
# secrets:
# CICD_SECRETS: ${{ secrets.CICD_SECRETS }}
create-infras:
needs: [code-security, build-containers, build-packages]
strategy:
matrix:
type: [docker, autoconf, swarm, k8s, linux]
uses: ./.github/workflows/staging-create-infra.yml
with:
TYPE: ${{ matrix.type }}
secrets:
CICD_SECRETS: ${{ secrets.CICD_SECRETS }}
# Perform tests
# tests-docker:
# needs: [create-infra-docker]
# uses: ./.github/workflows/staging-tests.yml
# with:
# TYPE: docker
# RUNS_ON: "['self-hosted', 'bw-docker']"
# secrets: inherit
# tests-autoconf:
# needs: [create-infra-autoconf]
# uses: ./.github/workflows/staging-tests.yml
# with:
# TYPE: autoconf
# RUNS_ON: "['self-hosted', 'bw-autoconf']"
# secrets: inherit
# tests-swarm:
# needs: [create-infra-swarm]
# uses: ./.github/workflows/staging-tests.yml
# with:
# TYPE: swarm
# RUNS_ON: "['self-hosted', 'bw-swarm']"
# secrets: inherit
# tests-k8s:
# needs: [create-infra-k8s]
# uses: ./.github/workflows/staging-tests.yml
# with:
# TYPE: k8s
# RUNS_ON: "['ubuntu-latest']"
# secrets: inherit
# tests-linux:
# needs: [create-infra-linux]
# uses: ./.github/workflows/staging-tests.yml
# with:
# TYPE: linux
# RUNS_ON: "['self-hosted', 'bw-linux']"
# secrets: inherit
staging-tests:
needs: [create-infras]
strategy:
matrix:
type: [docker, autoconf, swarm, k8s, linux]
include:
- type: docker
runs_on: "['self-hosted', 'bw-docker']"
- type: autoconf
runs_on: "['self-hosted', 'bw-autoconf']"
- type: swarm
runs_on: "['self-hosted', 'bw-swarm']"
- type: k8s
runs_on: "['ubuntu-latest']"
- type: linux
runs_on: "['self-hosted', 'bw-linux']"
uses: ./.github/workflows/staging-tests.yml
with:
TYPE: ${{ matrix.type }}
RUNS_ON: ${{ matrix.runs_on }}
secrets: inherit
tests-ui:
needs: [code-security, build-bw, build-scheduler, build-ui]
needs: [create-infras]
uses: ./.github/workflows/tests-ui.yml
with:
RELEASE: staging
@ -228,43 +131,14 @@ jobs:
PRIVATE_REGISTRY_TOKEN: ${{ secrets.PRIVATE_REGISTRY_TOKEN }}
# Delete infrastructures
# delete-infra-docker:
# if: ${{ always() }}
# needs: [tests-docker]
# uses: ./.github/workflows/staging-delete-infra.yml
# with:
# TYPE: docker
# secrets:
# CICD_SECRETS: ${{ secrets.CICD_SECRETS }}
# delete-infra-autoconf:
# if: ${{ always() }}
# needs: [tests-autoconf]
# uses: ./.github/workflows/staging-delete-infra.yml
# with:
# TYPE: autoconf
# secrets:
# CICD_SECRETS: ${{ secrets.CICD_SECRETS }}
# delete-infra-swarm:
# if: ${{ always() }}
# needs: [tests-swarm]
# uses: ./.github/workflows/staging-delete-infra.yml
# with:
# TYPE: swarm
# secrets:
# CICD_SECRETS: ${{ secrets.CICD_SECRETS }}
# delete-infra-k8s:
# if: ${{ always() }}
# needs: [tests-k8s]
# uses: ./.github/workflows/staging-delete-infra.yml
# with:
# TYPE: k8s
# secrets:
# CICD_SECRETS: ${{ secrets.CICD_SECRETS }}
# delete-infra-linux:
# if: ${{ always() }}
# needs: [tests-linux]
# uses: ./.github/workflows/staging-delete-infra.yml
# with:
# TYPE: linux
# secrets:
# CICD_SECRETS: ${{ secrets.CICD_SECRETS }}
delete-infras:
if: ${{ always() }}
needs: [create-infras]
strategy:
matrix:
type: [docker, autoconf, swarm, k8s, linux]
uses: ./.github/workflows/staging-delete-infra.yml
with:
TYPE: ${{ matrix.type }}
secrets:
CICD_SECRETS: ${{ secrets.CICD_SECRETS }}

3
.gitignore vendored
View File

@ -4,4 +4,5 @@ site/
__pycache__
env
node_modules
/src/ui/*.txt
/src/ui/*.txt
.mypy_cache

151
README.md
View File

@ -1,5 +1,5 @@
<p align="center">
<img alt="BunkerWeb logo" src="misc/logo.png" />
<img alt="BunkerWeb logo" src="https://github.com/bunkerity/bunkerweb/raw/v1.5.0-beta/misc/logo.png" />
</p>
<p align="center">
@ -17,7 +17,7 @@
&#124;
👨‍💻 <a href="https://demo.bunkerweb.io">Demo</a>
&#124;
🛡️ <a href="https://github.com/bunkerity/bunkerweb/tree/master/examples">Examples</a>
🛡️ <a href="https://github.com/bunkerity/bunkerweb/tree/v1.5.0-beta/examples">Examples</a>
&#124;
💬 <a href="https://discord.com/invite/fTf46FmtyD">Chat</a>
&#124;
@ -33,29 +33,29 @@
# BunkerWeb
<p align="center">
<img alt="overview" src="docs/assets/img/intro-overview.svg" />
<img alt="overview" src="https://github.com/bunkerity/bunkerweb/raw/v1.5.0-beta/docs/assets/img/intro-overview.svg" />
</p>
BunkerWeb is an incredible web server that is built upon the reliable [NGINX](https://nginx.org/) and designed with a primary focus on security.
BunkerWeb is a next-generation and open-source Web Application Firewall (WAF).
This web server can effortlessly be integrated into your current environment, whether you're using [Linux](#linux), [Docker](#docker), [Swarm](#swarm), [Kubernetes](#Kubernetes), and more. To ensure that your web services are "secure by default" without any added stress or effort.
Being a full-featured web server (based on [NGINX](https://nginx.org/) under the hood), it will protect your web services to make them "secure by default". BunkerWeb integrates seamlessly into your existing environments ([Linux](https://docs.bunkerweb.io/1.5.0-beta/integrations/#linux), [Docker](https://docs.bunkerweb.io/1.5.0-beta/integrations/#docker), [Swarm](https://docs.bunkerweb.io/1.5.0-beta/integrations/#swarm), [Kubernetes](https://docs.bunkerweb.io/1.5.0-beta/integrations/#kubernetes), …) and is fully configurable (don't panic, there is an [awesome web UI](https://docs.bunkerweb.io/1.5.0-beta/web-ui/) if you don't like the CLI) to meet your own use-cases . In other words, cybersecurity is no more a hassle.
With BunkerWeb, you can be assured that all security best practices are applied automatically, and you have total control over each setting to meet your unique needs. The web server comes with a table of primary [security features](#security-tuning) as part of the core, but you can easily expand it with extra features using the [plugin system](#plugins).
Overall, BunkerWeb is a great choice for anyone looking for a secure and user-friendly web server that guarantees optimum performance and uncompromised security for their web services.
BunkerWeb contains primary [security features](https://docs.bunkerweb.io/1.5.0-beta/security-tuning/) as part of the core but can be easily extended with additional ones thanks to a [plugin system](https://docs.bunkerweb.io/1.5.0-beta/plugins/)).
## Why BunkerWeb ?
- **Easy integration into existing environments** : support for Linux, Docker, Swarm and Kubernetes
- **Easy integration into existing environments** : support for Linux, Docker, Swarm, Kubernetes, Ansible, Vagrant, ...
- **Highly customizable** : enable, disable and configure features easily to meet your use case
- **Secure by default** : offers out-of-the-box and hassle-free minimal security for your web services
- **Awesome web UI** : keep control of everything more efficiently without the need of the CLI
- **Plugin system** : extend BunkerWeb to meet your own use-cases
- **Free as in "freedom"** : licensed under the free [AGPLv3 license](https://www.gnu.org/licenses/agpl-3.0.en.html)
## Security features
A non-exhaustive list of security features :
- **HTTPS** support with transparent **Let's Encrypt** automation.
- **HTTPS** support with transparent **Let's Encrypt** automation
- **State-of-the-art web security** : HTTP security headers, prevent leaks, TLS hardening, ...
- Integrated **ModSecurity WAF** with the **OWASP Core Rule Set**
- **Automatic ban** of strange behaviors based on HTTP status code
@ -64,12 +64,12 @@ A non-exhaustive list of security features :
- **Block known bad IPs** with external blacklists and DNSBL
- And much more ...
Learn more about the core security features in the [security tuning](https://docs.bunkerweb.io/latest/security-tuning) section of the documentation.
Learn more about the core security features in the [security tuning](https://docs.bunkerweb.io/1.5.0-beta/security-tuning/) section of the documentation.
## Demo
<p align="center">
<img alt="Demo GIF" src="docs/assets/img/demo.gif" />
<a href="https://www.youtube.com/watch?v=ZhYV-QELzA4" target="_blank"><img alt="BunkerWeb demo" src="https://yt-embed.herokuapp.com/embed?v=ZhYV-QELzA4" /></a>
</p>
A demo website protected with BunkerWeb is available at [demo.bunkerweb.io](https://demo.bunkerweb.io). Feel free to visit it and perform some security tests.
@ -77,10 +77,10 @@ A demo website protected with BunkerWeb is available at [demo.bunkerweb.io](http
# Concepts
<p align="center">
<img alt="BunkerWeb logo" src="docs/assets/img/concepts.svg" />
<img alt="BunkerWeb logo" src="https://github.com/bunkerity/bunkerweb/raw/v1.5.0-beta/docs/assets/img/concepts.svg" />
</p>
You will find more information about the key concepts of BunkerWeb in the [documentation](https://docs.bunkerweb.io/latest/concepts).
You will find more information about the key concepts of BunkerWeb in the [documentation](https://docs.bunkerweb.io/1.5.0-beta/concepts).
## Integrations
@ -88,18 +88,19 @@ The first concept is the integration of BunkerWeb into the target environment. W
The following integrations are officially supported :
- [Docker](https://docs.bunkerweb.io/latest/integrations/#docker)
- [Docker autoconf](https://docs.bunkerweb.io/latest/integrations/#docker-autoconf)
- [Swarm](https://docs.bunkerweb.io/latest/integrations/#swarm)
- [Kubernetes](https://docs.bunkerweb.io/latest/integrations/#kubernetes)
- [Linux](https://docs.bunkerweb.io/latest/integrations/#linux)
- [Ansible](https://docs.bunkerweb.io/latest/integrations/#ansible)
- [Docker](https://docs.bunkerweb.io/1.5.0-beta/integrations/#docker)
- [Docker autoconf](https://docs.bunkerweb.io/1.5.0-beta/integrations/#docker-autoconf)
- [Swarm](https://docs.bunkerweb.io/1.5.0-beta/integrations/#swarm)
- [Kubernetes](https://docs.bunkerweb.io/1.5.0-beta/integrations/#kubernetes)
- [Linux](https://docs.bunkerweb.io/1.5.0-beta/integrations/#linux)
- [Ansible](https://docs.bunkerweb.io/1.5.0-beta/integrations/#ansible)
- [Vagrant](https://docs.bunkerweb.io/1.5.0-beta/integrations/#vagrant)
## Settings
Once BunkerWeb is integrated into your environment, you will need to configure it to serve and protect your web applications.
Configuration of BunkerWeb is done using what we called the "settings" or "variables". Each setting is identified by a name like `AUTO_LETS_ENCRYPT` or `USE_ANTIBOT` for example. You can assign values to the settings to configure BunkerWeb.
The configuration of BunkerWeb is done by using what we call the "settings" or "variables". Each setting is identified by a name such as `AUTO_LETS_ENCRYPT` or `USE_ANTIBOT`. You can assign values to the settings to configure BunkerWeb.
Here is a dummy example of a BunkerWeb configuration :
@ -125,115 +126,149 @@ When multisite mode is enabled, BunkerWeb will serve and protect multiple web ap
## Custom configurations
Because meeting all the use cases only using the settings is not an option (even with [external plugins](https://docs.bunkerweb.io/latest/plugins)), you can use custom configurations to solve your specific challenges.
Because meeting all the use cases only using the settings is not an option (even with [external plugins](https://docs.bunkerweb.io/1.5.0-beta/plugins)), you can use custom configurations to solve your specific challenges.
Under the hood, BunkerWeb uses the notorious NGINX web server, that's why you can leverage its configuration system for your specific needs. Custom NGINX configurations can be included in different [contexts](https://docs.nginx.com/nginx/admin-guide/basic-functionality/managing-configuration-files/#contexts) like HTTP or server (all servers and/or specific server block).
Another core component of BunkerWeb is the ModSecurity Web Application Firewall : you can also use custom configurations to fix some false positives or add custom rules for example.
## Database
State of the current configuration of BunkerWeb is stored in a backend database which contains the following data :
- Settings defined for all the services
- Custom configurations
- BunkerWeb instances
- Metadata about jobs execution
- Cached files
The following backend database are supported : SQLite, MariaDB, MySQL and PostgreSQL
## Scheduler
To make things automagically work together, a dedicated service called the scheduler is in charge of :
- Storing the settings and custom configurations inside the database
- Executing various tasks (called jobs)
- Generating a configuration which is understood by BunkerWeb
- Being the intermediary for other services (like web UI or autoconf)
In other words, the scheduler is the brain of BunkerWeb.
# Setup
## Docker
<p align="center">
<img alt="Docker" src="docs/assets/img/integration-docker.svg" />
<img alt="Docker" src="https://github.com/bunkerity/bunkerweb/raw/v1.5.0-beta/docs/assets/img/integration-docker.svg" />
</p>
We provide ready to use prebuilt images for x64, x86, armv7 and arm64 platforms on [Docker Hub](https://hub.docker.com/r/bunkerity/bunkerweb) using the `bunkerity/bunkerweb` tag.
We provide ready to use prebuilt images for x64, x86, armv7 and arm64 platforms on [Docker Hub](https://hub.docker.com/u/bunkerity).
Usage and configuration of the BunkerWeb container are based on :
Docker integration key concepts are :
- **Environment variables** to configure BunkerWeb and meet your use cases
- **Volume** to cache important data and mount custom configuration files
- **Environment variables** to configure BunkerWeb
- **Scheduler** container to store configuration and execute jobs
- **Networks** to expose ports for clients and connect to upstream web services
You will find more information in the [Docker integration section](https://docs.bunkerweb.io/latest/integrations/#docker) of the documentation.
You will find more information in the [Docker integration section](https://docs.bunkerweb.io/1.5.0-beta/integrations/#docker) of the documentation.
## Docker autoconf
<p align="center">
<img alt="Docker autoconf" src="docs/assets/img/integration-autoconf.svg" />
<img alt="Docker autoconf" src="https://github.com/bunkerity/bunkerweb/raw/v1.5.0-beta/docs/assets/img/integration-autoconf.svg" />
</p>
The downside of using environment variables is that the container needs to be recreated each time there is an update which is not very convenient. To counter that issue, you can use another image called **autoconf** which will listen for Docker events and automatically reconfigure BunkerWeb in real-time without recreating the container.
Instead of defining environment variables for the BunkerWeb container, you simply add **labels** to your web applications containers and the **autoconf** will "automagically" take care of the rest.
You will find more information in the [Docker autoconf section](https://docs.bunkerweb.io/latest/integrations/#docker-autoconf) of the documentation.
You will find more information in the [Docker autoconf section](https://docs.bunkerweb.io/1.5.0-beta/integrations/#docker-autoconf) of the documentation.
## Swarm
<p align="center">
<img alt="Swarm" src="docs/assets/img/integration-swarm.svg" />
<img alt="Swarm" src="https://github.com/bunkerity/bunkerweb/raw/v1.5.0-beta/docs/assets/img/integration-swarm.svg" />
</p>
To automatically configure BunkerWeb instances, a special service, called **autoconf**, will be scheduled on a manager node. That service will listen for Docker Swarm events like service creation or deletion and automatically configure the **BunkerWeb instances** in real-time without downtime.
To automatically configure BunkerWeb instances, a special service, called **autoconf** will listen for Docker Swarm events like service creation or deletion and automatically configure the **BunkerWeb instances** in real-time without downtime.
Like the [Docker autoconf integration](#docker-autoconf), configuration for web services is defined using labels starting with the special **bunkerweb.** prefix.
Like the [Docker autoconf integration](https://docs.bunkerweb.io/1.5.0-beta/integrations/#docker-autoconf), configuration for web services is defined using labels starting with the special **bunkerweb.** prefix.
The recommended setup is to schedule the **BunkerWeb service** as a **global service** on all worker nodes and the **autoconf service** as a **single replicated service** on a manager node.
You will find more information in the [Swarm section](https://docs.bunkerweb.io/latest/integrations/#swarm) of the documentation.
You will find more information in the [Swarm section](https://docs.bunkerweb.io/1.5.0-beta/integrations/#swarm) of the documentation.
## Kubernetes
<p align="center">
<img alt="Kubernetes" src="docs/assets/img/integration-kubernetes.svg" />
<img alt="Kubernetes" src="https://github.com/bunkerity/bunkerweb/raw/v1.5.0-beta/docs/assets/img/integration-kubernetes.svg" />
</p>
The autoconf acts as an [Ingress controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) and will configure the BunkerWeb instances according to the [Ingress resources](https://kubernetes.io/docs/concepts/services-networking/ingress/). It also monitors other Kubernetes objects like [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) for custom configurations.
You will find more information in the [Kubernetes section](https://docs.bunkerweb.io/latest/integrations/#kubernetes) of the documentation.
You will find more information in the [Kubernetes section](https://docs.bunkerweb.io/1.5.0-beta/integrations/#kubernetes) of the documentation.
## Linux
<p align="center">
<img alt="Linux" src="docs/assets/img/integration-linux.svg" />
<img alt="Linux" src="https://github.com/bunkerity/bunkerweb/raw/v1.5.0-beta/docs/assets/img/integration-linux.svg" />
</p>
List of supported Linux distros :
- Debian 11 "Bullseye"
- Ubuntu 22.04 "Jammy"
- Fedora 36
- CentOS Stream 8
- Fedora 37
- RHEL 8.7
Repositories of Linux packages for BunkerWeb are available on [PackageCloud](https://packagecloud.io/bunkerity/bunkerweb), they provide a bash script to automatically add and trust the repository (but you can also follow the [manual installation](https://packagecloud.io/bunkerity/bunkerweb/install) instructions if you prefer).
You will find more information in the [Linux section](https://docs.bunkerweb.io/latest/integrations/#linux) of the documentation.
You will find more information in the [Linux section](https://docs.bunkerweb.io/1.5.0-beta/integrations/#linux) of the documentation.
## Ansible
<p align="center">
<img alt="Ansible" src="docs/assets/img/integration-ansible.svg" />
<img alt="Ansible" src="https://github.com/bunkerity/bunkerweb/raw/v1.5.0-beta/docs/assets/img/integration-ansible.svg" />
</p>
List of supported Linux distros :
- Debian 11 "Bullseye"
- Ubuntu 22.04 "Jammy"
- Fedora 36
- CentOS Stream 8
- Fedora 37
- RHEL 8.7
[Ansible](https://docs.ansible.com/ansible/latest/index.html) is an IT automation tool. It can configure systems, deploy software, and orchestrate more advanced IT tasks such as continuous deployments or zero downtime rolling updates.
[Ansible](https://www.ansible.com/) is an IT automation tool. It can configure systems, deploy software, and orchestrate more advanced IT tasks such as continuous deployments or zero downtime rolling updates.
A specific BunkerWeb Ansible role is available on [Ansible Galaxy](https://galaxy.ansible.com/bunkerity/bunkerweb) (source code is available [here](https://github.com/bunkerity/bunkerweb-ansible)).
You will find more information in the [Ansible section](https://docs.bunkerweb.io/latest/integrations/#ansible) of the documentation.
You will find more information in the [Ansible section](https://docs.bunkerweb.io/1.5.0-beta/integrations/#ansible) of the documentation.
## Vagrant
We maintain ready to use Vagrant boxes hosted on Vagrant cloud for the following providers :
- vmware_desktop
- virtualbox
- libvirt
You will find more information in the [Vagrant section](https://docs.bunkerweb.io/1.5.0-beta/integrations/#vagrant) of the documentation.
# Quickstart guide
Once you have setup BunkerWeb with the integration of your choice, you can follow the [quickstart guide](https://docs.bunkerweb.io/latest/quickstart-guide/) that will cover the following common use cases :
Once you have setup BunkerWeb with the integration of your choice, you can follow the [quickstart guide](https://docs.bunkerweb.io/1.5.0-beta/quickstart-guide/) that will cover the following common use cases :
- Protecting a single HTTP application
- Protecting multiple HTTP application
- Retrieving the real IP of clients when operating behind a load balancer
- Adding custom configurations
- Protecting generic TCP/UDP applications
- In combination with PHP
# Security tuning
BunkerWeb offers many security features that you can configure with [settings](/settings). Even if the default values of settings ensure a minimal "security by default", we strongly recommend you to tune them. By doing so you will be able to ensure a security level of your choice but also manage false positives.
BunkerWeb offers many security features that you can configure with [settings](https://docs.bunkerweb.io/1.5.0-beta/settings). Even if the default values of settings ensure a minimal "security by default", we strongly recommend you to tune them. By doing so you will be able to ensure a security level of your choice but also manage false positives.
You will find more information in the [security tuning section](https://docs.bunkerweb.io/latest/security-tuning) of the documentation.
You will find more information in the [security tuning section](https://docs.bunkerweb.io/1.5.0-beta/security-tuning) of the documentation.
# Settings
@ -243,12 +278,12 @@ As a general rule when multisite mode is enabled, if you want to apply settings
When settings are considered as "multiple", it means that you can have multiple groups of settings for the same feature by adding numbers as suffix like `REVERSE_PROXY_URL_1=/subdir`, `REVERSE_PROXY_HOST_1=http://myhost1`, `REVERSE_PROXY_URL_2=/anotherdir`, `REVERSE_PROXY_HOST_2=http://myhost2`, ... for example.
Check the [settings section](https://docs.bunkerweb.io/latest/settings) of the documentation to get the full list.
Check the [settings section](https://docs.bunkerweb.io/1.5.0-beta/settings) of the documentation to get the full list.
# Web UI
<p align="center">
<a href="https://www.youtube.com/watch?v=2n4EarhW7-Y" target="_blank"><img alt="BunkerWeb UI demo" src="https://yt-embed.herokuapp.com/embed?v=2n4EarhW7-Y" /></a>
<a href="https://www.youtube.com/watch?v=Ao20SfvQyr4" target="_blank"><img alt="BunkerWeb UI demo" src="https://yt-embed.herokuapp.com/embed?v=Ao20SfvQyr4" /></a>
</p>
The "Web UI" is a web application that helps you manage your BunkerWeb instance using a user-friendly interface instead of the command-line one.
@ -257,9 +292,11 @@ The "Web UI" is a web application that helps you manage your BunkerWeb instance
- Add, edit and delete settings for your web applications
- Add, edit and delete custom configurations for NGINX and ModSecurity
- Install and uninstall external plugins
- Explore the cached files
- Monitor jobs execution
- View the logs and search pattern
You will find more information in the [Web UI section](https://docs.bunkerweb.io/latest/web-ui) of the documentation.
You will find more information in the [Web UI section](https://docs.bunkerweb.io/1.5.0-beta/web-ui) of the documentation.
# Plugins
@ -275,7 +312,7 @@ Here is the list of "official" plugins that we maintain (see the [bunkerweb-plug
| **Slack** | 0.1 | Send security notifications to a Slack channel using a Webhook. | [bunkerweb-plugins/slack](https://github.com/bunkerity/bunkerweb-plugins/tree/main/slack) |
| **VirusTotal** | 0.1 | Automatically scans uploaded files with the VirusTotal API and denies the request when a file is detected as malicious. | [bunkerweb-plugins/virustotal](https://github.com/bunkerity/bunkerweb-plugins/tree/main/virustotal) |
You will find more information in the [plugins section](https://docs.bunkerweb.io/latest/plugins) of the documentation.
You will find more information in the [plugins section](https://docs.bunkerweb.io/1.5.0-beta/plugins) of the documentation.
# Support
@ -303,12 +340,12 @@ Please don't use [GitHub issues](https://github.com/bunkerity/bunkerweb/issues)
# License
This project is licensed under the terms of the [GNU Affero General Public License (AGPL) version 3](https://github.com/bunkerity/bunkerweb/tree/master/LICENSE.md).
This project is licensed under the terms of the [GNU Affero General Public License (AGPL) version 3](https://github.com/bunkerity/bunkerweb/tree/1.5.0-beta/LICENSE.md).
# Contribute
If you would like to contribute to the plugins you can read the [contributing guidelines](https://github.com/bunkerity/bunkerweb/tree/master/CONTRIBUTING.md) to get started.
If you would like to contribute to the plugins you can read the [contributing guidelines](https://github.com/bunkerity/bunkerweb/tree/1.5.0-beta/CONTRIBUTING.md) to get started.
# Security policy
We take security bugs as serious issues and encourage responsible disclosure, see our [security policy](https://github.com/bunkerity/bunkerweb/tree/master/SECURITY.md) for more information.
We take security bugs as serious issues and encourage responsible disclosure, see our [security policy](https://github.com/bunkerity/bunkerweb/tree/1.5.0-beta/SECURITY.md) for more information.

View File

@ -567,6 +567,7 @@ spec:
bunkerweb.io/INSTANCE: "yes"
spec:
containers:
# using bunkerweb as name is mandatory
- name: bunkerweb
image: bunkerity/bunkerweb:1.5.0-beta
imagePullPolicy: Always
@ -816,7 +817,7 @@ List of supported Linux distros :
- Debian 11 "Bullseye"
- Ubuntu 22.04 "Jammy"
- Fedora 37
- Fedora 38
- RedHat Enterprise Linux (RHEL) 8.7
Please note that you will need to **install NGINX 1.24.0 before BunkerWeb**. For all distros, except Fedora, using prebuilt packages from [official NGINX repository](https://nginx.org/en/linux_packages.html) is mandatory. Compiling NGINX from source or using packages from different repositories won't work with the official prebuilt packages of BunkerWeb but you can build it from source.
@ -899,7 +900,7 @@ Repositories of Linux packages for BunkerWeb are available on [PackageCloud](htt
sudo dnf install -y nginx-1.24.0
```
And finally install BunkerWeb 1.5.0-beta :
And finally install BunkerWeb 1.5.0_beta :
```shell
curl -s https://packagecloud.io/install/repositories/bunkerity/bunkerweb/script.rpm.sh | \
@ -949,7 +950,7 @@ Repositories of Linux packages for BunkerWeb are available on [PackageCloud](htt
dnf install -y epel-release && \
curl -s https://packagecloud.io/install/repositories/bunkerity/bunkerweb/script.rpm.sh | sudo bash && \
sudo dnf check-update && \
sudo dnf install -y bunkerweb-1.5.0-beta
sudo dnf install -y bunkerweb-1.5.0_beta
```
To prevent upgrading NGINX and/or BunkerWeb packages when executing `dnf upgrade`, you can use the following command :
@ -1082,13 +1083,12 @@ List of supported Linux distros :
- Debian 11 "Bullseye"
- Ubuntu 22.04 "Jammy"
- Fedora 37
- CentOS Stream 8
- Fedora 38
- RedHat Enterprise Linux (RHEL) 8.7
[Ansible](https://docs.ansible.com/ansible/latest/index.html) is an IT automation tool. It can configure systems, deploy software, and orchestrate more advanced IT tasks such as continuous deployments or zero downtime rolling updates.
A specific BunkerWeb Ansible role is available on [Ansible Galaxy](https://galaxy.ansible.com/bunkerity/bunkerweb) (source code is available [here](https://github.com/bunkerity/bunkerweb-ansible)).
A specific BunkerWeb Ansible role is available on [Ansible Galaxy](https://galaxy.ansible.com/bunkerity/bunkerweb).
First of all, download the role from ansible-galaxy :
```shell

View File

@ -522,6 +522,7 @@ Because the web UI is a web application, the recommended installation procedure
bunkerweb.io/INSTANCE: "yes"
spec:
containers:
# using bunkerweb as name is mandatory
- name: bunkerweb
image: bunkerity/bunkerweb:1.5.0-beta
imagePullPolicy: Always

View File

@ -46,6 +46,7 @@ spec:
bunkerweb.io/INSTANCE: "yes"
spec:
containers:
# using bunkerweb as name is mandatory
- name: bunkerweb
image: bunkerity/bunkerweb:1.5.0-beta
imagePullPolicy: Always

View File

@ -46,6 +46,7 @@ spec:
bunkerweb.io/INSTANCE: "yes"
spec:
containers:
# using bunkerweb as name is mandatory
- name: bunkerweb
image: bunkerity/bunkerweb:1.5.0-beta
imagePullPolicy: Always

View File

@ -46,6 +46,7 @@ spec:
bunkerweb.io/INSTANCE: "yes"
spec:
containers:
# using bunkerweb as name is mandatory
- name: bunkerweb
image: bunkerity/bunkerweb:1.5.0-beta
imagePullPolicy: Always

View File

@ -46,6 +46,7 @@ spec:
bunkerweb.io/INSTANCE: "yes"
spec:
containers:
# using bunkerweb as name is mandatory
- name: bunkerweb
image: bunkerity/bunkerweb:1.5.0-beta
imagePullPolicy: Always

View File

@ -46,6 +46,7 @@ spec:
bunkerweb.io/INSTANCE: "yes"
spec:
containers:
# using bunkerweb as name is mandatory
- name: bunkerweb
image: bunkerity/bunkerweb:1.5.0-beta
imagePullPolicy: Always

View File

@ -46,6 +46,7 @@ spec:
bunkerweb.io/INSTANCE: "yes"
spec:
containers:
# using bunkerweb as name is mandatory
- name: bunkerweb
image: bunkerity/bunkerweb:1.5.0-beta
imagePullPolicy: Always

View File

@ -19,4 +19,5 @@ for example in examples/* ; do
sed -i "s@${OLD_VERSION}@${NEW_VERSION}@g" ${example}/*.yml
done
# docs
# TODO : replace X.Y.Z_beta with X.Y.Z-beta (rpm install)
sed -i "s@${OLD_VERSION}@${NEW_VERSION}@g" docs/*.md

View File

@ -9,7 +9,7 @@ RUN mkdir -p /usr/share/bunkerweb/deps && \
rm -rf /tmp/req
# Install dependencies
RUN apk add --no-cache --virtual .build-deps g++ gcc musl-dev jpeg-dev zlib-dev libffi-dev cairo-dev pango-dev gdk-pixbuf-dev && \
RUN apk add --no-cache --virtual .build-deps g++ gcc musl-dev jpeg-dev zlib-dev libffi-dev cairo-dev pango-dev gdk-pixbuf-dev openssl-dev cargo postgresql-dev && \
pip install --no-cache-dir --upgrade pip && \
pip install wheel && \
mkdir -p /usr/share/bunkerweb/deps/python && \
@ -37,7 +37,6 @@ RUN apk add --no-cache bash && \
mkdir -p /etc/bunkerweb && \
mkdir -p /data/cache && ln -s /data/cache /var/cache/bunkerweb && \
mkdir -p /data/lib && ln -s /data/lib /var/lib/bunkerweb && \
mkdir -p /data/cache/letsencrypt && ln -s /data/cache/letsencrypt /etc/letsencrypt && \
mkdir -p /data/www && ln -s /data/www /var/www/html && \
for dir in $(echo "configs plugins") ; do mkdir -p "/data/${dir}" && ln -s "/data/${dir}" "/etc/bunkerweb/${dir}" ; done && \
for dir in $(echo "configs/http configs/stream configs/server-http configs/server-stream configs/default-server-http configs/default-server-stream configs/modsec configs/modsec-crs") ; do mkdir "/data/${dir}" ; done && \

View File

@ -41,10 +41,17 @@ class IngressController(Controller, ConfigCaller):
health = True
break
instance["health"] = health
instance["env"] = {
env.name: env.value or ""
for env in controller_instance.spec.containers[0].env
}
instance["env"] = {}
pod = None
for container in controller_instance.spec.containers:
if container.name == "bunkerweb":
pod = container
break
if not pod:
self.__logger.warning(f"Missing container bunkerweb in pod {controller_instance.metadata.name}")
else:
for env in pod.env:
instance["env"][env.name] = env.value
for controller_service in self._get_controller_services():
if controller_service.metadata.annotations:
for (
@ -156,8 +163,16 @@ class IngressController(Controller, ConfigCaller):
):
continue
pod = None
for container in instance.spec.containers:
if container.name == "bunkerweb":
pod = container
break
if not pod :
continue
variables = {
env.name: env.value or "" for env in instance.spec.containers[0].env
env.name: env.value or "" for env in pod.env
}
if "SERVER_NAME" in variables and variables["SERVER_NAME"].strip():

View File

@ -50,7 +50,6 @@ RUN apk add --no-cache pcre bash python3 && \
mkdir -p /var/www/html && \
mkdir -p /etc/bunkerweb && \
mkdir -p /data/cache && ln -s /data/cache /var/cache/bunkerweb && \
mkdir -p /data/cache/letsencrypt && ln -s /data/cache/letsencrypt /etc/letsencrypt && \
for dir in $(echo "configs plugins") ; do mkdir -p "/data/${dir}" && ln -s "/data/${dir}" "/etc/bunkerweb/${dir}" ; done && \
for dir in $(echo "configs/http configs/stream configs/server-http configs/server-stream configs/default-server-http configs/default-server-stream configs/modsec configs/modsec-crs") ; do mkdir "/data/${dir}" ; done && \
chown -R root:nginx /data && \

View File

@ -168,7 +168,21 @@ utils.ip_is_global = function(ip)
"224.0.0.0/4",
"233.252.0.0/24",
"240.0.0.0/4",
"255.255.255.255/32"
"255.255.255.255/32",
"::/128",
"::1/128",
"::ffff:0:0/96",
"::ffff:0:0:0/96",
"64:ff9b::/96",
"64:ff9b:1::/48",
"100::/64",
"2001:0000::/32",
"2001:20::/28",
"2001:db8::/32",
"2002::/16",
"fc00::/7",
"fe80::/10",
"ff00::/8"
}
-- Instantiate ipmatcher
local ipm, err = ipmatcher.new(reserved_ips)

Binary file not shown.

Binary file not shown.

View File

@ -15,6 +15,9 @@ server {
include /etc/bunkerweb/configs/server-http/{{ SERVER_NAME.split(" ")[0] }}/*.conf;
{% endif %}
# reason variable
set $reason '';
# include LUA files
include {{ NGINX_PREFIX }}set-lua.conf;
include {{ NGINX_PREFIX }}access-lua.conf;

View File

@ -257,6 +257,12 @@
</p></footer>
</main>
<!-- end content -->
<script>
function send_challenge(token) {
document.getElementById("token").value = token;
document.getElementById("form").submit();
}
</script>
</body>
</html>
{-raw-}

View File

@ -192,6 +192,8 @@ try:
if not cached:
logger.error(f"Error while caching blacklist : {err}")
status = 2
else:
status = 1
except:
status = 2
logger.error(

View File

@ -1,9 +1,9 @@
local class = require "middleclass"
local plugin = require "bunkerweb.plugin"
local class = require "middleclass"
local plugin = require "bunkerweb.plugin"
local utils = require "bunkerweb.utils"
local datastore = require "bunkerweb.datastore"
local cjson = require "cjson"
local http = require "resty.http"
local cjson = require "cjson"
local http = require "resty.http"
local bunkernet = class("bunkernet", plugin)
@ -15,6 +15,8 @@ function bunkernet:initialize()
local id, err = self.datastore:get("plugin_bunkernet_id")
if id then
self.bunkernet_id = id
self.version = ngx.ctx.bw.version
self.integration = ngx.ctx.bw.integration
else
self.logger:log(ngx.ERR, "can't get BunkerNet ID from datastore : " .. err)
end
@ -58,7 +60,7 @@ function bunkernet:init()
ret = false
else
for line in f:lines() do
if utils.is_ipv4(line) and utils.ip_is_global(line) then
if (utils.is_ipv4(line) or utils.is_ipv6(line)) and utils.ip_is_global(line) then
table.insert(db.ip, line)
i = i + 1
end
@ -72,11 +74,54 @@ function bunkernet:init()
if not ok then
return self:ret(false, "can't store bunkernet database into datastore : " .. err)
end
return self:ret(true, "successfully connected to the bunkernet service " .. self.server .. " with machine ID " .. id .. " and " .. tostring(i) .. " bad IPs in database")
return self:ret(true,
"successfully connected to the bunkernet service " ..
self.variables["BUNKERNET_SERVER"] .. " with machine ID " .. id .. " and " .. tostring(i) .. " bad IPs in database")
end
function bunkernet:access()
-- Check if not loading
if self.is_loading then
return self:ret(true, "bunkerweb is loading")
end
-- Check if enabled
if self.variables["USE_BUNKERNET"] ~= "yes" then
return self:ret(true, "bunkernet not activated")
end
-- Check if BunkerNet ID is generated
if not self.bunkernet_id then
return self:ret(false, "bunkernet ID is not generated")
end
-- Check if IP is global
if not ngx.ctx.bw.ip_is_global then
return self:ret(true, "IP is not global")
end
-- Check if whitelisted
if ngx.ctx.bw.is_whitelisted == "yes" then
return self:ret(true, "client is whitelisted")
end
-- Extract DB
local db, err = self.datastore:get("plugin_bunkernet_db")
if db then
db = cjson.decode(db)
-- Check if is IP is present
if #db.ip > 0 then
local present, err = utils.is_ip_in_networks(ngx.ctx.bw.remote_addr, db.ip)
if present == nil then
return self:ret(false, "can't check if ip is in db : " .. err)
end
if present then
return self:ret(true, "ip is in db", utils.get_deny_status())
end
end
else
return self:ret(false, "can't get bunkernet db " .. err)
end
return self:ret(true, "not in db")
end
function bunkernet:log(bypass_use_bunkernet)
-- Check if not loading is needed
-- Check if not loading
if self.is_loading then
return self:ret(true, "bunkerweb is loading")
end
@ -103,10 +148,8 @@ function bunkernet:log(bypass_use_bunkernet)
return self:ret(true, "IP is not global")
end
-- TODO : check if IP has been reported recently
self.integration = ngx.ctx.bw.integration
self.version = ngx.ctx.bw.version
local function report_callback(premature, obj, ip, reason, method, url, headers)
local ok, err, status, data = obj:report(ip, reason, method, url, headers, obj.ctx.integration, obj.ctx.version)
local ok, err, status, data = obj:report(ip, reason, method, url, headers)
if status == 429 then
obj.logger:log(ngx.WARN, "bunkernet API is rate limiting us")
elseif not ok then
@ -159,9 +202,9 @@ function bunkernet:request(method, url, data)
return false, "can't instantiate http object : " .. err, nil, nil
end
local all_data = {
id = self.id,
integration = self.integration,
version = self.version
id = self.bunkernet_id,
version = self.version,
integration = self.integration
}
for k, v in pairs(data) do
all_data[k] = v
@ -203,4 +246,4 @@ function bunkernet:report(ip, reason, method, url, headers)
return self:request("POST", "/report", data)
end
return bunkernet
return bunkernet

View File

@ -18,10 +18,10 @@ sys_path.extend(
from bunkernet import data
from Database import Database
from logger import setup_logger
from jobs import cache_file, cache_hash, file_hash, is_cached_file
from jobs import cache_file, cache_hash, file_hash, is_cached_file, get_file_in_db
logger = setup_logger("BUNKERNET", getenv("LOG_LEVEL", "INFO"))
status = 0
exit_status = 0
try:
# Check if at least a server has BunkerNet activated
@ -43,17 +43,29 @@ try:
logger.info("BunkerNet is not activated, skipping download...")
_exit(0)
# Create directory if it doesn't exist
Path("/var/cache/bunkerweb/bunkernet").mkdir(parents=True, exist_ok=True)
Path("/var/tmp/bunkerweb").mkdir(parents=True, exist_ok=True)
# Create empty file in case it doesn't exist
if not Path("/var/cache/bunkerweb/bunkernet/ip.list").is_file():
Path("/var/cache/bunkerweb/bunkernet/ip.list").write_text("")
# Get ID from cache
bunkernet_id = None
db = Database(
logger,
sqlalchemy_string=getenv("DATABASE_URI", None),
)
# Create directory if it doesn't exist
Path("/var/cache/bunkerweb/bunkernet").mkdir(parents=True, exist_ok=True)
# Create empty file in case it doesn't exist
if not Path("/var/tmp/bunkerweb/bunkernet-ip.list").is_file():
Path("/var/tmp/bunkerweb/bunkernet-ip.list").write_bytes(b"")
if db:
bunkernet_id = get_file_in_db("bunkernet-register", "instance.id", db)
if bunkernet_id:
Path("/var/cache/bunkerweb/bunkernet/bunkernet.id").write_text(
bunkernet_id.decode()
)
logger.info("Successfully retrieved BunkerNet ID from db cache")
else:
logger.info("No BunkerNet ID found in db cache")
# Check if ID is present
if not Path("/var/cache/bunkerweb/bunkernet/instance.id").is_file():
@ -63,11 +75,14 @@ try:
_exit(2)
# Don't go further if the cache is fresh
if is_cached_file("/var/cache/bunkerweb/bunkernet/ip.list", "day", db):
logger.info(
"BunkerNet list is already in cache, skipping download...",
)
_exit(0)
if db:
if is_cached_file("/var/cache/bunkerweb/bunkernet/ip.list", "day", db):
logger.info(
"BunkerNet list is already in cache, skipping download...",
)
_exit(0)
exit_status = 1
# Download data
logger.info("Downloading BunkerNet data ...")
@ -94,13 +109,14 @@ try:
logger.error(
f"Received invalid data from BunkerNet API while sending db request : {data}",
)
_exit(1)
_exit(2)
if data["result"] != "ok":
logger.error(
f"Received error from BunkerNet API while sending db request : {data['data']}, removing instance ID",
)
_exit(2)
logger.info("Successfully downloaded data from BunkerNet API")
# Writing data to file
@ -130,10 +146,10 @@ try:
logger.info("Successfully saved BunkerNet data")
status = 1
exit_status = 1
except:
status = 2
exit_status = 2
logger.error(f"Exception while running bunkernet-data.py :\n{format_exc()}")
sys_exit(status)
sys_exit(exit_status)

View File

@ -19,9 +19,10 @@ sys_path.extend(
from bunkernet import register, ping, get_id
from Database import Database
from logger import setup_logger
from jobs import get_file_in_db, set_file_in_db, del_file_in_db
logger = setup_logger("BUNKERNET", getenv("LOG_LEVEL", "INFO"))
status = 0
exit_status = 0
try:
# Check if at least a server has BunkerNet activated
@ -51,8 +52,24 @@ try:
# Create directory if it doesn't exist
Path("/var/cache/bunkerweb/bunkernet").mkdir(parents=True, exist_ok=True)
# Ask an ID if needed
# Get ID from cache
bunkernet_id = None
db = Database(
logger,
sqlalchemy_string=getenv("DATABASE_URI", None),
)
if db:
bunkernet_id = get_file_in_db("bunkernet-register", "instance.id", db)
if bunkernet_id:
Path("/var/cache/bunkerweb/bunkernet/instance.id").write_text(
bunkernet_id.decode()
)
logger.info("Successfully retrieved BunkerNet ID from db cache")
else:
logger.info("No BunkerNet ID found in db cache")
# Register instance
registered = False
if not Path("/var/cache/bunkerweb/bunkernet/instance.id").is_file():
logger.info("Registering instance on BunkerNet API ...")
ok, status, data = register()
@ -60,7 +77,7 @@ try:
logger.error(
f"Error while sending register request to BunkerNet API : {data}"
)
_exit(1)
_exit(2)
elif status == 429:
logger.warning(
"BunkerNet API is rate limiting us, trying again later...",
@ -78,19 +95,22 @@ try:
logger.error(
f"Received invalid data from BunkerNet API while sending db request : {data}, retrying later...",
)
_exit(1)
_exit(2)
if status != 200:
logger.error(
f"Error {status} from BunkerNet API : {data['data']}",
)
_exit(1)
_exit(2)
elif data.get("result", "ko") != "ok":
logger.error(
f"Received error from BunkerNet API while sending register request : {data.get('data', {})}"
)
_exit(1)
_exit(2)
bunkernet_id = data["data"]
Path("/var/cache/bunkerweb/bunkernet/instance.id").write_text(bunkernet_id)
registered = True
exit_status = 1
logger.info(
f"Successfully registered on BunkerNet API with instance id {data['data']}"
)
@ -100,6 +120,15 @@ try:
sleep(1)
# Update cache with new bunkernet ID
if db and registered:
with open("/var/cache/bunkerweb/bunkernet/instance.id", "rb") as f:
cached, err = set_file_in_db(f"bunkernet-register", f"instance.id", f, db)
if not cached:
logger.error(f"Error while saving BunkerNet data to db cache : {err}")
else:
logger.info("Successfully saved BunkerNet data to db cache")
# Ping
logger.info("Checking connectivity with BunkerNet API ...")
bunkernet_ping = False
@ -118,11 +147,14 @@ try:
logger.warning(
"BunkerNet has banned this instance, retrying a register later...",
)
_exit(2)
elif status == 401:
logger.warning(
"Instance ID is not registered, removing it and retrying a register later...",
)
Path("/var/cache/bunkerweb/bunkernet/instance.id").unlink()
if db:
del_file_in_db("bunkernet-register", "instance.id", db)
_exit(2)
try:
@ -131,11 +163,11 @@ try:
logger.error(
f"Received invalid data from BunkerNet API while sending db request : {data}, retrying later...",
)
_exit(1)
_exit(2)
if data.get("result", "ko") != "ok":
logger.error(
f"Received error from BunkerNet API while sending ping request : {data.get('data', {})}, removing instance ID",
f"Received error from BunkerNet API while sending ping request : {data.get('data', {})}",
)
retry = True
if not retry:
@ -144,35 +176,14 @@ try:
logger.warning("Waiting 1s and trying again ...")
sleep(1)
if bunkernet_ping and status != 403:
logger.info("Connectivity with BunkerWeb is successful !")
status = 1
if not Path("/var/cache/bunkerweb/bunkernet/instance.id").is_file():
Path("/var/cache/bunkerweb/bunkernet/instance.id").write_text(bunkernet_id)
db = Database(
logger,
sqlalchemy_string=getenv("DATABASE_URI", None),
)
lock = Lock()
# Update db
with lock:
err = db.update_job_cache(
"bunkernet-register",
None,
"instance.id",
bunkernet_id.encode("utf-8"),
)
if err:
logger.warning(f"Couldn't update db cache: {err}")
if bunkernet_ping:
logger.info("Connectivity with BunkerNet is successful !")
else:
logger.error("Connectivity with BunkerWeb failed ...")
status = 2
logger.error("Connectivity with BunkerNet failed ...")
exit_status = 2
except:
status = 2
exit_status = 2
logger.error(f"Exception while running bunkernet-register.py :\n{format_exc()}")
sys_exit(status)
sys_exit(exit_status)

View File

@ -74,7 +74,7 @@ function country:access()
if not ok then
return self:ret(false, "error while adding item to cache : " .. err)
end
return self:ret(true, "client IP " .. ngx.ctx.bw.remote_addr .. " is blacklisted (country = " .. country .. ")", true, utils.get_deny_status())
return self:ret(true, "client IP " .. ngx.ctx.bw.remote_addr .. " is blacklisted (country = " .. country .. ")", utils.get_deny_status())
end
end
end

View File

@ -174,6 +174,8 @@ try:
if not cached:
logger.error(f"Error while caching greylist : {err}")
status = 2
else:
status = 1
except:
status = 2
logger.error(

View File

@ -161,6 +161,7 @@ try:
f"Couldn't update external plugins to database: {err}",
)
status = 1
logger.info("External plugins downloaded and installed")
except:

View File

@ -10,8 +10,8 @@ location ~ ^/.well-known/acme-challenge/ {
listen 0.0.0.0:{{ HTTPS_PORT }} ssl {% if HTTP2 == "yes" %}http2{% endif %} {% if USE_PROXY_PROTOCOL == "yes" %}proxy_protocol{% endif %};
# TLS config
ssl_certificate /etc/letsencrypt/live/{{ SERVER_NAME.split(" ")[0] }}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/{{ SERVER_NAME.split(" ")[0] }}/privkey.pem;
ssl_certificate /var/cache/bunkerweb/letsencrypt/etc/live/{{ SERVER_NAME.split(" ")[0] }}/fullchain.pem;
ssl_certificate_key /var/cache/bunkerweb/letsencrypt/etc/live/{{ SERVER_NAME.split(" ")[0] }}/privkey.pem;
ssl_protocols {{ SSL_PROTOCOLS }};
ssl_prefer_server_ciphers on;
ssl_session_tickets off;
@ -22,4 +22,4 @@ ssl_dhparam /etc/nginx/dhparam;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
{% endif %}
{% endif %}
{% endif %}

View File

@ -4,8 +4,8 @@
listen 0.0.0.0:{{ LISTEN_STREAM_PORT_SSL }} ssl {% if USE_UDP == "yes" %} udp {% endif %}{% if USE_PROXY_PROTOCOL == "yes" %} proxy_protocol {% endif %};
# TLS config
ssl_certificate /etc/letsencrypt/live/{{ SERVER_NAME.split(" ")[0] }}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/{{ SERVER_NAME.split(" ")[0] }}/privkey.pem;
ssl_certificate /var/cache/bunkerweb/letsencrypt/etc/live/{{ SERVER_NAME.split(" ")[0] }}/fullchain.pem;
ssl_certificate_key /var/cache/bunkerweb/letsencrypt/etc/live/{{ SERVER_NAME.split(" ")[0] }}/privkey.pem;
ssl_protocols {{ SSL_PROTOCOLS }};
ssl_prefer_server_ciphers on;
ssl_session_tickets off;
@ -16,4 +16,4 @@ ssl_dhparam /etc/nginx/dhparam;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
{% endif %}
{% endif %}
{% endif %}

View File

@ -47,7 +47,7 @@ try:
tgz = BytesIO()
with tar_open(mode="w:gz", fileobj=tgz) as tf:
tf.add("/var/cache/bunkerweb/letsencrypt", arcname=".")
tf.add("/var/cache/bunkerweb/letsencrypt/etc", arcname="etc")
tgz.seek(0, 0)
files = {"archive.tar.gz": tgz}
@ -102,7 +102,7 @@ try:
# Linux case
else:
proc = run(
["/etc/init.d/nginx", "reload"],
["sudo", "/usr/sbin/nginx", "-s", "reload"],
stdin=DEVNULL,
stderr=STDOUT,
)

View File

@ -1,11 +1,14 @@
#!/usr/bin/python3
from os import environ, getenv
from os import environ, getenv, listdir
from pathlib import Path
from subprocess import DEVNULL, STDOUT, run
from sys import exit as sys_exit, path as sys_path
from threading import Lock
from traceback import format_exc
from tarfile import open as tfopen
from io import BytesIO
from shutil import rmtree
sys_path.extend(
(
@ -17,8 +20,9 @@ sys_path.extend(
from Database import Database
from logger import setup_logger
from jobs import get_file_in_db, set_file_in_db
logger = setup_logger("LETS-ENCRYPT", getenv("LOG_LEVEL", "INFO"))
logger = setup_logger("LETS-ENCRYPT.new", getenv("LOG_LEVEL", "INFO"))
db = Database(
logger,
sqlalchemy_string=getenv("DATABASE_URI", None),
@ -33,6 +37,9 @@ def certbot_new(domains, email):
[
"/usr/share/bunkerweb/deps/python/bin/certbot",
"certonly",
"--config-dir=/var/cache/bunkerweb/letsencrypt/etc",
"--work-dir=/var/cache/bunkerweb/letsencrypt/lib",
"--logs-dir=/var/cache/bunkerweb/letsencrypt/log",
"--manual",
"--preferred-challenges=http",
"--manual-auth-hook",
@ -54,7 +61,30 @@ def certbot_new(domains, email):
return proc.returncode
status = 0
try:
# Create directory if it doesn't exist
Path("/var/cache/bunkerweb/letsencrypt").mkdir(parents=True, exist_ok=True)
# Extract letsencrypt folder if it exists in db
db = Database(
logger,
sqlalchemy_string=getenv("DATABASE_URI", None),
)
if db:
tgz = get_file_in_db("certbot-new", "folder.tgz", db)
if tgz:
# Delete folder if needed
if len(listdir("/var/cache/bunkerweb/letsencrypt")) > 0:
rmtree("/var/cache/bunkerweb/letsencrypt", ignore_errors=True)
# Extract it
with tfopen(name="folder.tgz", mode="r:gz", fileobj=BytesIO(tgz)) as tf:
tf.extractall("/var/cache/bunkerweb/letsencrypt")
logger.info("Successfully retrieved Let's Encrypt data from db cache")
else:
logger.info("No Let's Encrypt data found in db cache")
# Multisite case
if getenv("MULTISITE", "no") == "yes":
for first_server in getenv("SERVER_NAME", "").split(" "):
@ -72,7 +102,9 @@ try:
" ", ","
)
if Path(f"/etc/letsencrypt/live/{first_server}/cert.pem").exists():
if Path(
f"/var/cache/bunkerweb/letsencrypt/{first_server}/cert.pem"
).exists():
logger.info(
f"Certificates already exists for domain(s) {domains}",
)
@ -89,36 +121,24 @@ try:
f"Asking certificates for domains : {domains} (email = {real_email}) ...",
)
if certbot_new(domains, real_email) != 0:
status = 1
status = 2
logger.error(
f"Certificate generation failed for domain(s) {domains} ...",
)
else:
status = 1
logger.info(
f"Certificate generation succeeded for domain(s) : {domains}"
)
if Path(f"/etc/letsencrypt/live/{first_server}/cert.pem").exists():
# Update db
with lock:
err = db.update_job_cache(
"certbot-new",
first_server,
"cert.pem",
Path(
f"/etc/letsencrypt/live/{first_server}/cert.pem"
).read_bytes(),
)
if err:
logger.warning(f"Couldn't update db cache: {err}")
# Singlesite case
elif getenv("AUTO_LETS_ENCRYPT", "no") == "yes" and getenv("SERVER_NAME"):
first_server = getenv("SERVER_NAME", "").split(" ")[0]
domains = getenv("SERVER_NAME", "").replace(" ", ",")
if Path(f"/etc/letsencrypt/live/{first_server}/cert.pem").exists():
if Path(
f"/var/cache/bunkerweb/letsencrypt/etc/live/{first_server}/cert.pem"
).exists():
logger.info(f"Certificates already exists for domain(s) {domains}")
else:
real_email = getenv("EMAIL_LETS_ENCRYPT", f"contact@{first_server}")
@ -132,26 +152,31 @@ try:
status = 2
logger.error(f"Certificate generation failed for domain(s) : {domains}")
else:
status = 1
logger.info(
f"Certificate generation succeeded for domain(s) : {domains}"
)
if Path(f"/etc/letsencrypt/live/{first_server}/cert.pem").exists():
# Update db
with lock:
err = db.update_job_cache(
"certbot-new",
first_server,
"cert.pem",
Path(
f"/etc/letsencrypt/live/{first_server}/cert.pem"
).read_bytes(),
)
# Put new folder in cache
if db:
bio = BytesIO()
with tfopen("folder.tgz", mode="w:gz", fileobj=bio) as tgz:
tgz.add("/var/cache/bunkerweb/letsencrypt", arcname=".")
bio.seek(0)
# Put tgz in cache
cached, err = set_file_in_db(f"certbot-new", f"folder.tgz", bio, db)
if not cached:
logger.error(f"Error while saving Let's Encrypt data to db cache : {err}")
else:
logger.info("Successfully saved Let's Encrypt data to db cache")
# Delete lib and log folders to avoid sending them
if Path("/var/cache/bunkerweb/letsencrypt/lib").exists():
rmtree("/var/cache/bunkerweb/letsencrypt/lib", ignore_errors=True)
if Path("/var/cache/bunkerweb/letsencrypt/log").exists():
rmtree("/var/cache/bunkerweb/letsencrypt/log", ignore_errors=True)
if err:
logger.warning(f"Couldn't update db cache: {err}")
except:
status = 1
status = 3
logger.error(f"Exception while running certbot-new.py :\n{format_exc()}")
sys_exit(status)

View File

@ -1,19 +1,25 @@
#!/usr/bin/python3
from os import environ, getenv
from os import environ, getenv, listdir
from pathlib import Path
from subprocess import DEVNULL, STDOUT, run
from sys import exit as sys_exit, path as sys_path
from traceback import format_exc
from tarfile import open as tfopen
from io import BytesIO
from shutil import rmtree
sys_path.extend(
(
"/usr/share/bunkerweb/deps/python",
"/usr/share/bunkerweb/utils",
"/usr/share/bunkerweb/db",
)
)
from logger import setup_logger
from Database import Database
from jobs import get_file_in_db, set_file_in_db
def renew(domain):
@ -22,6 +28,9 @@ def renew(domain):
[
"/usr/share/bunkerweb/deps/python/bin/certbot",
"renew",
"--config-dir=/var/cache/bunkerweb/letsencrypt/etc",
"--work-dir=/var/cache/bunkerweb/letsencrypt/lib",
"--logs-dir=/var/cache/bunkerweb/letsencrypt/log",
"--cert-name",
domain,
"--deploy-hook",
@ -34,10 +43,31 @@ def renew(domain):
return proc.returncode
logger = setup_logger("LETS-ENCRYPT", getenv("LOG_LEVEL", "INFO"))
logger = setup_logger("LETS-ENCRYPT.renew", getenv("LOG_LEVEL", "INFO"))
status = 0
try:
# Create directory if it doesn't exist
Path("/var/cache/bunkerweb/letsencrypt").mkdir(parents=True, exist_ok=True)
# Extract letsencrypt folder if it exists in db
db = Database(
logger,
sqlalchemy_string=getenv("DATABASE_URI", None),
)
if db:
tgz = get_file_in_db("certbot-new", "folder.tgz", db)
if tgz:
# Delete folder if needed
if len(listdir("/var/cache/bunkerweb/letsencrypt")) > 0:
rmtree("/var/cache/bunkerweb/letsencrypt", ignore_errors=True)
# Extract it
with tfopen(name="folder.tgz", mode="r:gz", fileobj=BytesIO(tgz)) as tf:
tf.extractall("/var/cache/bunkerweb/letsencrypt")
logger.info("Successfully retrieved Let's Encrypt data from db cache")
else:
logger.info("No Let's Encrypt data found in db cache")
if getenv("MULTISITE") == "yes":
servers = getenv("SERVER_NAME", [])
@ -52,7 +82,9 @@ try:
getenv("AUTO_LETS_ENCRYPT", "no"),
)
!= "yes"
or not Path(f"/etc/letsencrypt/live/{first_server}/cert.pem").exists()
or not Path(
f"/var/cache/bunkerweb/letsencrypt/etc/live/{first_server}/cert.pem"
).exists()
):
continue
@ -64,7 +96,9 @@ try:
)
elif getenv("AUTO_LETS_ENCRYPT", "no") == "yes" and not getenv("SERVER_NAME", ""):
first_server = getenv("SERVER_NAME", "").split(" ")[0]
if Path(f"/etc/letsencrypt/live/{first_server}/cert.pem").exists():
if Path(
f"/var/cache/bunkerweb/letsencrypt/etc/live/{first_server}/cert.pem"
).exists():
ret = renew(first_server)
if ret != 0:
status = 2
@ -72,6 +106,24 @@ try:
f"Certificates renewal for {first_server} failed",
)
# Put new folder in cache
if db:
bio = BytesIO()
with tfopen("folder.tgz", mode="w:gz", fileobj=bio) as tgz:
tgz.add("/var/cache/bunkerweb/letsencrypt", arcname=".")
bio.seek(0)
# Put tgz in cache
cached, err = set_file_in_db("certbot-new", "folder.tgz", bio, db)
if not cached:
logger.error(f"Error while saving Let's Encrypt data to db cache : {err}")
else:
logger.info("Successfully saved Let's Encrypt data to db cache")
# Delete lib and log folders to avoid sending them
if Path("/var/cache/bunkerweb/letsencrypt/lib").exists():
rmtree("/var/cache/bunkerweb/letsencrypt/lib", ignore_errors=True)
if Path("/var/cache/bunkerweb/letsencrypt/log").exists():
rmtree("/var/cache/bunkerweb/letsencrypt/log", ignore_errors=True)
except:
status = 2
logger.error(f"Exception while running certbot-renew.py :\n{format_exc()}")

View File

@ -65,6 +65,7 @@ try:
)
status = 2
else:
status = 1
logger.info(
"Successfully generated self-signed certificate for default server",
)

View File

@ -107,10 +107,10 @@ try:
f"Exception while getting RealIP list from {url} :\n{format_exc()}"
)
Path("/var/tmp/bunkerweb/realip-combined.list").write_bytes(content)
Path("/var/tmp/bunkerweb/realip/combined.list").write_bytes(content)
# Check if file has changed
new_hash = file_hash("/var/tmp/bunkerweb/realip-combined.list")
new_hash = file_hash("/var/tmp/bunkerweb/realip/combined.list")
old_hash = cache_hash("/var/cache/bunkerweb/realip/combined.list", db)
if new_hash == old_hash:
logger.info("New file is identical to cache file, reload is not needed")

View File

@ -25,6 +25,8 @@ db = Database(
)
lock = Lock()
status = 0
def generate_cert(first_server, days, subj):
if Path(f"/var/cache/bunkerweb/selfsigned/{first_server}.pem").is_file():
@ -41,6 +43,8 @@ def generate_cert(first_server, days, subj):
logger.error(f"Self-signed certificate generation failed for {first_server}")
return False, 2
return True, 1
# Update db
with lock:
err = db.update_job_cache(
@ -103,10 +107,7 @@ try:
getenv("SELF_SIGNED_SSL_SUBJ", "/CN=www.example.com/"),
),
)
if not ret:
status = ret_status
elif ret_status == 1 and ret_status != 2:
status = 1
status = ret_status
# Singlesite case
elif getenv("GENERATE_SELF_SIGNED_SSL", "no") == "yes" and getenv("SERVER_NAME"):
@ -116,10 +117,7 @@ try:
getenv("SELF_SIGNED_SSL_EXPIRY", "365"),
getenv("SELF_SIGNED_SSL_SUBJ", "/CN=www.example.com/"),
)
if not ret:
status = ret_status
elif ret_status == 1 and ret_status != 2:
status = 1
status = ret_status
except:
status = 2

View File

@ -176,6 +176,8 @@ try:
if not cached:
logger.error(f"Error while caching whitelist : {err}")
status = 2
else:
status = 1
except:
status = 2
logger.error(

View File

@ -877,6 +877,12 @@ class Database:
return ""
def delete_job_cache(self, job_name: str, file_name: str):
with self.__db_session() as session:
session.query(Jobs_cache).filter_by(
job_name=job_name, file_name=file_name
).delete()
def update_job_cache(
self,
job_name: str,

View File

@ -8,7 +8,6 @@ from sqlalchemy import (
Integer,
LargeBinary,
PrimaryKeyConstraint,
SmallInteger,
String,
)
from sqlalchemy.orm import declarative_base, relationship
@ -53,7 +52,7 @@ Base = declarative_base()
class Plugins(Base):
__tablename__ = "plugins"
__tablename__ = "bw_plugins"
id = Column(String(64), primary_key=True)
order = Column(Integer, nullable=False)
@ -74,7 +73,7 @@ class Plugins(Base):
class Settings(Base):
__tablename__ = "settings"
__tablename__ = "bw_settings"
__table_args__ = (
PrimaryKeyConstraint("id", "name"),
UniqueConstraint("id"),
@ -85,7 +84,7 @@ class Settings(Base):
name = Column(String(256), primary_key=True)
plugin_id = Column(
String(64),
ForeignKey("plugins.id", onupdate="cascade", ondelete="cascade"),
ForeignKey("bw_plugins.id", onupdate="cascade", ondelete="cascade"),
nullable=False,
)
context = Column(CONTEXTS_ENUM, nullable=False)
@ -107,22 +106,22 @@ class Settings(Base):
class Global_values(Base):
__tablename__ = "global_values"
__tablename__ = "bw_global_values"
setting_id = Column(
String(256),
ForeignKey("settings.id", onupdate="cascade", ondelete="cascade"),
ForeignKey("bw_settings.id", onupdate="cascade", ondelete="cascade"),
primary_key=True,
)
value = Column(String(4096), nullable=False)
suffix = Column(SmallInteger, primary_key=True, nullable=True, default=0)
suffix = Column(Integer, primary_key=True, nullable=True, default=0)
method = Column(METHODS_ENUM, nullable=False)
setting = relationship("Settings", back_populates="global_value")
class Services(Base):
__tablename__ = "services"
__tablename__ = "bw_services"
id = Column(String(64), primary_key=True)
method = Column(METHODS_ENUM, nullable=False)
@ -137,20 +136,20 @@ class Services(Base):
class Services_settings(Base):
__tablename__ = "services_settings"
__tablename__ = "bw_services_settings"
service_id = Column(
String(64),
ForeignKey("services.id", onupdate="cascade", ondelete="cascade"),
ForeignKey("bw_services.id", onupdate="cascade", ondelete="cascade"),
primary_key=True,
)
setting_id = Column(
String(256),
ForeignKey("settings.id", onupdate="cascade", ondelete="cascade"),
ForeignKey("bw_settings.id", onupdate="cascade", ondelete="cascade"),
primary_key=True,
)
value = Column(String(4096), nullable=False)
suffix = Column(SmallInteger, primary_key=True, nullable=True, default=0)
suffix = Column(Integer, primary_key=True, nullable=True, default=0)
method = Column(METHODS_ENUM, nullable=False)
service = relationship("Services", back_populates="settings")
@ -158,13 +157,13 @@ class Services_settings(Base):
class Jobs(Base):
__tablename__ = "jobs"
__tablename__ = "bw_jobs"
__table_args__ = (UniqueConstraint("name", "plugin_id"),)
name = Column(String(128), primary_key=True)
plugin_id = Column(
String(64),
ForeignKey("plugins.id", onupdate="cascade", ondelete="cascade"),
ForeignKey("bw_plugins.id", onupdate="cascade", ondelete="cascade"),
)
file_name = Column(String(256), nullable=False)
every = Column(SCHEDULES_ENUM, nullable=False)
@ -177,7 +176,7 @@ class Jobs(Base):
class Plugin_pages(Base):
__tablename__ = "plugin_pages"
__tablename__ = "bw_plugin_pages"
id = Column(
Integer,
@ -186,7 +185,7 @@ class Plugin_pages(Base):
)
plugin_id = Column(
String(64),
ForeignKey("plugins.id", onupdate="cascade", ondelete="cascade"),
ForeignKey("bw_plugins.id", onupdate="cascade", ondelete="cascade"),
nullable=False,
)
template_file = Column(LargeBinary(length=(2**32) - 1), nullable=False)
@ -198,7 +197,7 @@ class Plugin_pages(Base):
class Jobs_cache(Base):
__tablename__ = "jobs_cache"
__tablename__ = "bw_jobs_cache"
__table_args__ = (UniqueConstraint("job_name", "service_id", "file_name"),)
id = Column(
@ -208,12 +207,12 @@ class Jobs_cache(Base):
)
job_name = Column(
String(128),
ForeignKey("jobs.name", onupdate="cascade", ondelete="cascade"),
ForeignKey("bw_jobs.name", onupdate="cascade", ondelete="cascade"),
nullable=False,
)
service_id = Column(
String(64),
ForeignKey("services.id", onupdate="cascade", ondelete="cascade"),
ForeignKey("bw_services.id", onupdate="cascade", ondelete="cascade"),
nullable=True,
)
file_name = Column(
@ -229,7 +228,7 @@ class Jobs_cache(Base):
class Custom_configs(Base):
__tablename__ = "custom_configs"
__tablename__ = "bw_custom_configs"
__table_args__ = (UniqueConstraint("service_id", "type", "name"),)
id = Column(
@ -239,7 +238,7 @@ class Custom_configs(Base):
)
service_id = Column(
String(64),
ForeignKey("services.id", onupdate="cascade", ondelete="cascade"),
ForeignKey("bw_services.id", onupdate="cascade", ondelete="cascade"),
nullable=True,
)
type = Column(CUSTOM_CONFIGS_TYPES_ENUM, nullable=False)
@ -252,11 +251,11 @@ class Custom_configs(Base):
class Selects(Base):
__tablename__ = "selects"
__tablename__ = "bw_selects"
setting_id = Column(
String(256),
ForeignKey("settings.id", onupdate="cascade", ondelete="cascade"),
ForeignKey("bw_settings.id", onupdate="cascade", ondelete="cascade"),
primary_key=True,
)
value = Column(String(256), primary_key=True)
@ -265,7 +264,7 @@ class Selects(Base):
class Instances(Base):
__tablename__ = "instances"
__tablename__ = "bw_instances"
hostname = Column(String(256), primary_key=True)
port = Column(Integer, nullable=False)
@ -273,7 +272,7 @@ class Instances(Base):
class Metadata(Base):
__tablename__ = "metadata"
__tablename__ = "bw_metadata"
id = Column(Integer, primary_key=True, default=1)
is_initialized = Column(Boolean, nullable=False)

View File

@ -49,10 +49,15 @@ def get_instance_configs_and_apis(instance: Any, db, _type="Docker"):
):
splitted = var.split("=", 1)
if custom_confs_rx.match(splitted[0]):
custom_conf = custom_confs_rx.search(splitted[0]).groups()
custom_confs.append(
{
"value": splitted[1],
"exploded": custom_confs_rx.search(splitted[0]).groups(),
"exploded": (
custom_conf[0],
custom_conf[1],
custom_conf[2].replace(".conf", ""),
),
}
)
else:
@ -227,11 +232,20 @@ if __name__ == "__main__":
plugins_settings=plugins_settings,
)
config_files = config.get_config()
custom_confs = [
{"value": v, "exploded": custom_confs_rx.search(k).groups()} # type: ignore
for k, v in environ.items()
if custom_confs_rx.match(k)
]
custom_confs = []
for k, v in environ.items():
if custom_confs_rx.match(k):
custom_conf = custom_confs_rx.search(k).groups()
custom_confs.append(
{
"value": v,
"exploded": (
custom_conf[0],
custom_conf[1],
custom_conf[2].replace(".conf", ""),
),
}
)
root_dirs = listdir("/etc/bunkerweb/configs")
for root, dirs, files in walk("/etc/bunkerweb/configs", topdown=True):
if (
@ -276,12 +290,15 @@ if __name__ == "__main__":
for var in instance.attrs["Config"]["Env"]:
splitted = var.split("=", 1)
if custom_confs_rx.match(splitted[0]):
custom_conf = custom_confs_rx.search(splitted[0]).groups()
custom_confs.append(
{
"value": splitted[1],
"exploded": custom_confs_rx.search(
splitted[0]
).groups(),
"exploded": (
custom_conf[0],
custom_conf[1],
custom_conf[2].replace(".conf", ""),
),
}
)
else:

View File

@ -138,14 +138,16 @@ class ApiCaller:
f"Successfully sent API request to {api.get_endpoint()}{url}",
)
if response:
instance = api.get_endpoint().replace("http://", "").split(":")[0]
if isinstance(resp, dict):
responses[instance] = resp
else:
responses[instance] = resp.json()
if response:
instance = (
api.get_endpoint().replace("http://", "").split(":")[0]
)
if isinstance(resp, dict):
responses[instance] = resp
else:
responses[instance] = resp.json()
if response:
if response and responses:
return ret, responses
return ret

View File

@ -56,7 +56,44 @@ def is_cached_file(file: str, expire: str, db=None) -> bool:
if is_cached and cached_file:
Path(file).write_bytes(cached_file.data)
return is_cached
return is_cached and cached_file
def get_file_in_db(job: str, file: str, db) -> bytes:
cached_file = db.get_job_cache_file(job, file)
if not cached_file:
return False
return cached_file.data
def set_file_in_db(job: str, name: str, bio, db) -> Tuple[bool, str]:
ret, err = True, "success"
try:
content = bio.read()
bio.seek(0)
with lock:
err = db.update_job_cache(
basename(getsourcefile(_getframe(1))).replace(".py", ""),
None,
name,
content,
checksum=bytes_hash(bio),
)
if err:
ret = False
except:
return False, f"exception :\n{format_exc()}"
return ret, err
def del_file_in_db(job: str, name: str, db) -> Tuple[bool, str]:
ret, err = True, "success"
try:
db.delete_job_cache(job, name)
except:
return False, f"exception :\n{format_exc()}"
return ret, err
def file_hash(file: str) -> str:
@ -70,6 +107,17 @@ def file_hash(file: str) -> str:
return _sha512.hexdigest()
def bytes_hash(bio: bytes) -> str:
_sha512 = sha512()
while True:
data = bio.read(1024)
if not data:
break
_sha512.update(data)
bio.seek(0)
return _sha512.hexdigest()
def cache_hash(cache: str, db=None) -> Optional[str]:
with suppress(BaseException):
return loads(Path(f"{cache}.md").read_text()).get("checksum", None)

View File

@ -64,7 +64,6 @@ RUN cp /usr/share/bunkerweb/helpers/bwcli /usr/bin/ && \
mkdir -p /var/tmp/bunkerweb/ && \
mkdir -p /var/www/html && \
mkdir -p /var/lib/bunkerweb && \
mkdir -p /etc/letsencrypt && \
#mkdir /var/www/html && \
echo "Linux" > /usr/share/bunkerweb/INTEGRATION && \
#It's a find command that will find all files in the bunkerweb directory, excluding the ui/deps directory, and then chmod them to 0740.

View File

@ -69,8 +69,7 @@ RUN cp /usr/share/bunkerweb/helpers/bwcli /usr/bin/ && \
mkdir -p /var/tmp/bunkerweb/ && \
mkdir -p /var/www/ && \
mkdir -p /var/lib/bunkerweb && \
mkdir -p /etc/letsencrypt && \
#mkdir /var/www/html && \
mkdir /var/www/html && \
echo "Linux" > /usr/share/bunkerweb/INTEGRATION && \
#It's a find command that will find all files in the bunkerweb directory, excluding the ui/deps directory, and then chmod them to 0740.
find /usr/share/bunkerweb -path /usr/share/bunkerweb/ui/deps -prune -o -type f -exec chmod 0740 {} \; && \

View File

@ -1,4 +1,4 @@
FROM fedora:37
FROM fedora:38
ENV OS=fedora
ENV NGINX_VERSION 1.24.0
@ -63,7 +63,6 @@ RUN cp /usr/share/bunkerweb/helpers/bwcli /usr/bin/ && \
mkdir -p /var/tmp/bunkerweb/ && \
mkdir -p /var/www/html && \
mkdir -p /var/lib/bunkerweb && \
mkdir -p /etc/letsencrypt && \
echo "Linux" > /usr/share/bunkerweb/INTEGRATION && \
find /usr/share/bunkerweb -path /usr/share/bunkerweb/ui/deps -prune -o -type f -exec chmod 0740 {} \; && \
find /usr/share/bunkerweb -path /usr/share/bunkerweb/ui/deps -prune -o -type d -exec chmod 0750 {} \; && \

View File

@ -5,6 +5,7 @@ ENV NGINX_VERSION 1.24.0
# Copy centos repo
COPY src/linux/centos.repo /etc/yum.repos.d/centos.repo
RUN sed -i "s/%ARCH%/$(uname -m)/g" src/linux/centos.repo /etc/yum.repos.d/centos.repo
# Copy RPM-GPG-KEY-CentOS-Official
COPY src/linux/RPM-GPG-KEY-centosofficial /etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
@ -76,7 +77,6 @@ RUN cp /usr/share/bunkerweb/helpers/bwcli /usr/bin/ && \
mkdir -p /var/tmp/bunkerweb/ && \
mkdir -p /var/www/html && \
mkdir -p /var/lib/bunkerweb && \
mkdir -p /etc/letsencrypt && \
echo "Linux" > /usr/share/bunkerweb/INTEGRATION && \
find /usr/share/bunkerweb -path /usr/share/bunkerweb/ui/deps -prune -o -type f -exec chmod 0740 {} \; && \
find /usr/share/bunkerweb -path /usr/share/bunkerweb/ui/deps -prune -o -type d -exec chmod 0750 {} \; && \

View File

@ -66,7 +66,6 @@ RUN cp /usr/share/bunkerweb/helpers/bwcli /usr/bin/ && \
mkdir -p /var/tmp/bunkerweb/ && \
mkdir -p /var/www/html && \
mkdir -p /var/lib/bunkerweb && \
mkdir -p /etc/letsencrypt && \
echo "Linux" > /usr/share/bunkerweb/INTEGRATION && \
find /usr/share/bunkerweb -path /usr/share/bunkerweb/ui/deps -prune -o -type f -exec chmod 0740 {} \; && \
find /usr/share/bunkerweb -path /usr/share/bunkerweb/ui/deps -prune -o -type d -exec chmod 0750 {} \; && \

View File

@ -1,6 +1,6 @@
[centos8-base]
name = CentOS 8 Base OS
baseurl = http://mirror.centos.org/centos/8-stream/BaseOS/x86_64/os/
baseurl = http://mirror.centos.org/centos/8-stream/BaseOS/%ARCH%/os/
gpgcheck = 1
enabled = 1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial

View File

@ -10,4 +10,4 @@
--before-install /usr/share/bunkerweb/scripts/beforeInstall.sh
--after-install /usr/share/bunkerweb/scripts/postinstall.sh
--after-remove /usr/share/bunkerweb/scripts/afterRemoveRPM.sh
/usr/share/bunkerweb/=/usr/share/bunkerweb/ /usr/bin/bwcli=/usr/bin/bwcli /etc/bunkerweb/=/etc/bunkerweb /var/tmp/bunkerweb/=/var/tmp/bunkerweb /var/cache/bunkerweb/=/var/cache/bunkerweb /lib/systemd/system/bunkerweb.service=/lib/systemd/system/bunkerweb.service /lib/systemd/system/bunkerweb-ui.service=/lib/systemd/system/bunkerweb-ui.service /var/lib/bunkerweb=/var/lib/bunkerweb /etc/letsencrypt=/etc/letsencrypt
/usr/share/bunkerweb/=/usr/share/bunkerweb/ /usr/bin/bwcli=/usr/bin/bwcli /etc/bunkerweb/=/etc/bunkerweb /var/tmp/bunkerweb/=/var/tmp/bunkerweb /var/cache/bunkerweb/=/var/cache/bunkerweb /lib/systemd/system/bunkerweb.service=/lib/systemd/system/bunkerweb.service /lib/systemd/system/bunkerweb-ui.service=/lib/systemd/system/bunkerweb-ui.service /var/lib/bunkerweb=/var/lib/bunkerweb

View File

@ -10,4 +10,4 @@
--before-install /usr/share/bunkerweb/scripts/beforeInstall.sh
--after-install /usr/share/bunkerweb/scripts/postinstall.sh
--after-remove /usr/share/bunkerweb/scripts/afterRemoveDEB.sh
/usr/share/bunkerweb/=/usr/share/bunkerweb/ /usr/bin/bwcli=/usr/bin/bwcli /etc/bunkerweb/=/etc/bunkerweb /var/tmp/bunkerweb/=/var/tmp/bunkerweb /var/cache/bunkerweb/=/var/cache/bunkerweb /lib/systemd/system/bunkerweb.service=/lib/systemd/system/bunkerweb.service /lib/systemd/system/bunkerweb-ui.service=/lib/systemd/system/bunkerweb-ui.service /var/lib/bunkerweb=/var/lib/bunkerweb /etc/letsencrypt=/etc/letsencrypt
/usr/share/bunkerweb/=/usr/share/bunkerweb/ /usr/bin/bwcli=/usr/bin/bwcli /etc/bunkerweb/=/etc/bunkerweb /var/tmp/bunkerweb/=/var/tmp/bunkerweb /var/cache/bunkerweb/=/var/cache/bunkerweb /lib/systemd/system/bunkerweb.service=/lib/systemd/system/bunkerweb.service /lib/systemd/system/bunkerweb-ui.service=/lib/systemd/system/bunkerweb-ui.service /var/lib/bunkerweb=/var/lib/bunkerweb

View File

@ -4,10 +4,10 @@
--version %VERSION%
--architecture x86_64
--depends bash --depends python3 --depends 'nginx = 1:1.24.0-1.fc37' --depends libcurl-devel --depends libxml2 --depends lmdb-libs --depends geoip-devel --depends gd --depends sudo --depends procps --depends lsof --depends nginx-mod-stream
--description "BunkerWeb %VERSION% for Fedora 37"
--description "BunkerWeb %VERSION% for Fedora 38"
--url "https://www.bunkerweb.io"
--maintainer "Bunkerity <contact at bunkerity dot com>"
--before-install /usr/share/bunkerweb/scripts/beforeInstall.sh
--after-install /usr/share/bunkerweb/scripts/postinstall.sh
--after-remove /usr/share/bunkerweb/scripts/afterRemoveRPM.sh
/usr/share/bunkerweb/=/usr/share/bunkerweb/ /usr/bin/bwcli=/usr/bin/bwcli /etc/bunkerweb/=/etc/bunkerweb /var/tmp/bunkerweb/=/var/tmp/bunkerweb /var/cache/bunkerweb/=/var/cache/bunkerweb /lib/systemd/system/bunkerweb.service=/lib/systemd/system/bunkerweb.service /lib/systemd/system/bunkerweb-ui.service=/lib/systemd/system/bunkerweb-ui.service /var/lib/bunkerweb=/var/lib/bunkerweb /etc/letsencrypt=/etc/letsencrypt
/usr/share/bunkerweb/=/usr/share/bunkerweb/ /usr/bin/bwcli=/usr/bin/bwcli /etc/bunkerweb/=/etc/bunkerweb /var/tmp/bunkerweb/=/var/tmp/bunkerweb /var/cache/bunkerweb/=/var/cache/bunkerweb /lib/systemd/system/bunkerweb.service=/lib/systemd/system/bunkerweb.service /lib/systemd/system/bunkerweb-ui.service=/lib/systemd/system/bunkerweb-ui.service /var/lib/bunkerweb=/var/lib/bunkerweb

View File

@ -10,4 +10,4 @@
--before-install /usr/share/bunkerweb/scripts/beforeInstall.sh
--after-install /usr/share/bunkerweb/scripts/postinstall.sh
--after-remove /usr/share/bunkerweb/scripts/afterRemoveRPM.sh
/usr/share/bunkerweb/=/usr/share/bunkerweb/ /usr/bin/bwcli=/usr/bin/bwcli /etc/bunkerweb/=/etc/bunkerweb /var/tmp/bunkerweb/=/var/tmp/bunkerweb /var/cache/bunkerweb/=/var/cache/bunkerweb /lib/systemd/system/bunkerweb.service=/lib/systemd/system/bunkerweb.service /lib/systemd/system/bunkerweb-ui.service=/lib/systemd/system/bunkerweb-ui.service /var/lib/bunkerweb=/var/lib/bunkerweb /etc/letsencrypt=/etc/letsencrypt
/usr/share/bunkerweb/=/usr/share/bunkerweb/ /usr/bin/bwcli=/usr/bin/bwcli /etc/bunkerweb/=/etc/bunkerweb /var/tmp/bunkerweb/=/var/tmp/bunkerweb /var/cache/bunkerweb/=/var/cache/bunkerweb /lib/systemd/system/bunkerweb.service=/lib/systemd/system/bunkerweb.service /lib/systemd/system/bunkerweb-ui.service=/lib/systemd/system/bunkerweb-ui.service /var/lib/bunkerweb=/var/lib/bunkerweb

View File

@ -10,4 +10,4 @@
--after-install /usr/share/bunkerweb/scripts/postinstall.sh
--after-remove /usr/share/bunkerweb/scripts/afterRemoveDEB.sh
--deb-no-default-config-files
/usr/share/bunkerweb/=/usr/share/bunkerweb/ /usr/bin/bwcli=/usr/bin/bwcli /etc/bunkerweb/=/etc/bunkerweb /var/tmp/bunkerweb/=/var/tmp/bunkerweb /var/cache/bunkerweb/=/var/cache/bunkerweb /lib/systemd/system/bunkerweb.service=/lib/systemd/system/bunkerweb.service /lib/systemd/system/bunkerweb-ui.service=/lib/systemd/system/bunkerweb-ui.service /var/lib/bunkerweb=/var/lib/bunkerweb /etc/letsencrypt=/etc/letsencrypt
/usr/share/bunkerweb/=/usr/share/bunkerweb/ /usr/bin/bwcli=/usr/bin/bwcli /etc/bunkerweb/=/etc/bunkerweb /var/tmp/bunkerweb/=/var/tmp/bunkerweb /var/cache/bunkerweb/=/var/cache/bunkerweb /lib/systemd/system/bunkerweb.service=/lib/systemd/system/bunkerweb.service /lib/systemd/system/bunkerweb-ui.service=/lib/systemd/system/bunkerweb-ui.service /var/lib/bunkerweb=/var/lib/bunkerweb

View File

@ -30,7 +30,7 @@ fi
do_and_check_cmd mkdir "$package_dir"
# Generate package
version="$(cat VERSION | tr -d '\n')"
version="$(cat src/VERSION | tr -d '\n')"
type="deb"
if [ "$linux" = "fedora" ] || [ "$linux" = "centos" ] || [ "$linux" = "rhel" ] ; then
type="rpm"

View File

@ -1,124 +0,0 @@
#!/bin/bash
function do_and_check_cmd() {
if [ "$CHANGE_DIR" != "" ] ; then
cd "$CHANGE_DIR"
fi
output=$("$@" 2>&1)
ret="$?"
if [ $ret -ne 0 ] ; then
echo "❌ Error from command : $*"
echo "$output"
exit $ret
fi
#echo $output
return 0
}
# Check if we are root
if [ $(id -u) -ne 0 ] ; then
echo "❌ Run me as root"
exit 1
fi
# Detect OS
OS=""
if [ "$(grep Debian /etc/os-release)" != "" ] ; then
OS="debian"
elif [ "$(grep Ubuntu /etc/os-release)" != "" ] ; then
OS="ubuntu"
elif [ "$(grep CentOS /etc/os-release)" != "" ] ; then
OS="centos"
elif [ "$(grep Fedora /etc/os-release)" != "" ] ; then
OS="fedora"
fi
if [ "$OS" = "" ] ; then
echo "❌ Unsupported Operating System"
exit 1
fi
# Stop nginx
systemctl status nginx > /dev/null 2>&1
if [ $? -eq 0 ] ; then
echo " Stop nginx service"
do_and_check_cmd systemctl stop nginx
fi
# Reload old nginx.service file
# echo " Restore old nginx service"
# do_and_check_cmd mv /lib/systemd/system/nginx.service.bak /lib/systemd/system/nginx.service
# do_and_check_cmd systemctl daemon-reload
# echo " Remove bunkerweb-ui service"
if [ -f "/lib/systemd/system/bunkerweb-ui.service" ] ; then
echo " Remove bunkerweb-ui service"
do_and_check_cmd systemctl stop bunkerweb-ui
do_and_check_cmd systemctl disable bunkerweb-ui
do_and_check_cmd rm -f /lib/systemd/system/bunkerweb-ui.service
do_and_check_cmd systemctl daemon-reload
do_and_check_cmd systemctl reset-failed
fi
# do_and_check_cmd systemctl disable bunkerweb-ui
# do_and_check_cmd rm -f /etc/systemd/system/bunkerweb-ui.service
# do_and_check_cmd systemctl daemon-reload
# do_and_check_cmd systemctl reset-failed
# do_and_check_cmd sed -i "s@nginx ALL=(root:root) NOPASSWD: /usr/share/bunkerweb/ui/linux.sh@@" /etc/sudoers
# Remove /usr/share/bunkerweb
if [ -e "/usr/share/bunkerweb" ] ; then
echo " Remove /usr/share/bunkerweb"
do_and_check_cmd rm -rf /usr/share/bunkerweb
fi
# Remove /etc/bunkerweb
if [ -e "/etc/bunkerweb" ] ; then
echo " Remove /etc/bunkerweb"
do_and_check_cmd rm -rf /etc/bunkerweb
fi
# # Remove /var/tmp/bunkerweb
# if [ -e "/var/tmp/bunkerweb" ] ; then
# echo " Remove /var/tmp/bunkerweb"
# do_and_check_cmd rm -rf /var/tmp/bunkerweb
# fi
# Remove /var/lib/bunkerweb
if [ -e "/var/lib/bunkerweb" ] ; then
echo " Remove /var/lib/bunkerweb"
do_and_check_cmd rm -rf /var/lib/bunkerweb
fi
# Remove /usr/bin/bwcli
if [ -f "/usr/bin/bwcli" ] ; then
echo " Remove /usr/bin/bwcli"
do_and_check_cmd rm -f /usr/bin/bwcli
fi
# Remove systemd service
if [ -f "/lib/systemd/system/bunkerweb.service" ] ; then
echo " Remove bunkerweb service"
do_and_check_cmd systemctl stop bunkerweb
do_and_check_cmd systemctl disable bunkerweb
do_and_check_cmd rm -f /lib/systemd/system/bunkerweb.service
do_and_check_cmd systemctl daemon-reload
do_and_check_cmd systemctl reset-failed
fi
# Uninstall nginx
# if [ "$OS" = "debian" ] || [ "$OS" = "ubuntu" ] ; then
# echo " Uninstall nginx"
# do_and_check_cmd systemctl stop nginx
# do_and_check_cmd apt remove nginx -y
# echo " If you want to reinstall nginx, run the following command:"
# echo "apt-get install nginx"
# elif [ "$OS" = "centos" ] || [ "$OS" = "fedora" ] ; then
# echo " Uninstall nginx"
# do_and_check_cmd systemctl stop nginx
# do_and_check_cmd yum remove nginx -y
# echo " If you want to reinstall nginx, run the following command:"
# echo "apt-get install nginx"
# fi
# We're done
echo " BunkerWeb successfully uninstalled"

View File

@ -95,22 +95,4 @@ else
echo "/var/www/html directory already exists, skipping copy..."
fi
# Create letsencrypt folders if needed
if [ ! -d /etc/letsencrypt ] ; then
mkdir /etc/letsencrypt
fi
chown nginx:nginx /etc/letsencrypt
chmod 770 /etc/letsencrypt
if [ ! -d /var/lib/letsencrypt ] ; then
mkdir /var/lib/letsencrypt
fi
chown nginx:nginx /var/lib/letsencrypt
chmod 770 /var/lib/letsencrypt
if [ ! -d /var/log/letsencrypt ] ; then
mkdir /var/log/letsencrypt
fi
chown nginx:nginx /var/log/letsencrypt
chmod 770 /var/log/letsencrypt
echo "Postinstall successful !"
echo "Postinstall successful !"

View File

@ -10,7 +10,7 @@ RUN mkdir -p /usr/share/bunkerweb/deps && \
rm -rf /tmp/req
# Install python requirements
RUN apk add --no-cache --virtual .build-deps g++ gcc musl-dev jpeg-dev zlib-dev libffi-dev cairo-dev pango-dev gdk-pixbuf-dev && \
RUN apk add --no-cache --virtual .build-deps g++ gcc musl-dev jpeg-dev zlib-dev libffi-dev cairo-dev pango-dev gdk-pixbuf-dev openssl-dev cargo postgresql-dev && \
pip install --no-cache-dir --upgrade pip && \
pip install wheel && \
mkdir -p /usr/share/bunkerweb/deps/python && \
@ -44,7 +44,6 @@ RUN apk add --no-cache bash libgcc libstdc++ openssl && \
mkdir -p /etc/bunkerweb && \
mkdir -p /data/cache && ln -s /data/cache /var/cache/bunkerweb && \
mkdir -p /data/lib && ln -s /data/lib /var/lib/bunkerweb && \
mkdir -p /data/cache/letsencrypt && ln -s /data/cache/letsencrypt /etc/letsencrypt && \
mkdir -p /data/www && ln -s /data/www /var/www/html && \
for dir in $(echo "configs plugins") ; do mkdir -p "/data/${dir}" && ln -s "/data/${dir}" "/etc/bunkerweb/${dir}" ; done && \
for dir in $(echo "configs/http configs/stream configs/server-http configs/server-stream configs/default-server-http configs/default-server-stream configs/modsec configs/modsec-crs") ; do mkdir "/data/${dir}" ; done && \

View File

@ -105,10 +105,12 @@ class JobScheduler(ApiCaller):
f"Executing job {name} from plugin {plugin} ...",
)
success = True
ret = -1
try:
proc = run(
f"{path}jobs/{file}", stdin=DEVNULL, stderr=STDOUT, env=self.__env
)
ret = proc.returncode
except BaseException:
success = False
self.__logger.error(
@ -136,6 +138,7 @@ class JobScheduler(ApiCaller):
self.__logger.warning(
f"Failed to update database for the job {name} from plugin {plugin}: {err}",
)
return ret
def setup(self):
for plugin, jobs in self.__jobs.items():
@ -164,7 +167,7 @@ class JobScheduler(ApiCaller):
ret = job.run()
if ret == 1:
reload = True
elif (ret or 2) >= 2:
elif ret < 0 or ret >= 2:
success = False
if reload:
try:

View File

@ -10,7 +10,7 @@ RUN mkdir -p /usr/share/bunkerweb/deps && \
rm -rf /tmp/req
# Install python requirements
RUN apk add --no-cache --virtual .build-deps g++ gcc musl-dev jpeg-dev zlib-dev libffi-dev cairo-dev pango-dev gdk-pixbuf-dev && \
RUN apk add --no-cache --virtual .build-deps g++ gcc musl-dev jpeg-dev zlib-dev libffi-dev cairo-dev pango-dev gdk-pixbuf-dev openssl-dev cargo file make postgresql-dev && \
pip install --no-cache-dir --upgrade pip && \
pip install wheel && \
mkdir -p /usr/share/bunkerweb/deps/python && \

View File

@ -54,7 +54,8 @@ from kubernetes import client as kube_client
from kubernetes import config as kube_config
from kubernetes.client.exceptions import ApiException as kube_ApiException
from os import _exit, getenv, getpid, listdir
from re import match as re_match
from re import compile as re_compile
from regex import match as regex_match
from requests import get
from shutil import move, rmtree
from signal import SIGINT, signal, SIGTERM
@ -135,8 +136,13 @@ elif "ADMIN_PASSWORD" not in vars:
logger.error("ADMIN_PASSWORD is not set")
stop(1)
if not vars.get("FLASK_DEBUG", False) and vars["ADMIN_PASSWORD"] == "changeme":
logger.error("Please change the default admin password.")
if not vars.get("FLASK_DEBUG", False) and not regex_match(
r"^(?=.*?\p{Lowercase_Letter})(?=.*?\p{Uppercase_Letter})(?=.*?\d)(?=.*?[ !\"#$%&'()*+,\-./:;<=>?@[\\\]^_`{|}~]).{8,}$",
vars["ADMIN_PASSWORD"],
):
logger.error(
"The admin password is not strong enough. It must contain at least 8 characters, including at least 1 uppercase letter, 1 lowercase letter, 1 number and 1 special character (#@?!$%^&*-)."
)
stop(1)
if not vars["ABSOLUTE_URI"].endswith("/"):
@ -231,6 +237,8 @@ except FileNotFoundError as e:
logger.error(repr(e), e.filename)
stop(1)
plugin_id_rx = re_compile(r"^[\w_-]{1,64}$")
# Declare functions for jinja2
app.jinja_env.globals.update(check_settings=check_settings)
@ -1222,7 +1230,7 @@ def upload_plugin():
@app.route("/plugins/<plugin>", methods=["GET", "POST"])
@login_required
def custom_plugin(plugin):
if not re_match(r"^[a-zA-Z0-9_-]{1,64}$", plugin):
if not plugin_id_rx.match(plugin):
flash(
f"Invalid plugin id, <b>{plugin}</b> (must be between 1 and 64 characters, only letters, numbers, underscores and hyphens)",
"error",

View File

@ -6,3 +6,4 @@ python_dateutil==2.8.2
bcrypt==4.0.1
gunicorn==20.1.0
gevent==22.10.2
regex==2023.5.5

View File

@ -1,5 +1,5 @@
#
# This file is autogenerated by pip-compile with Python 3.9
# This file is autogenerated by pip-compile with Python 3.11
# by the following command:
#
# pip-compile --allow-unsafe --generate-hashes --resolver=backtracking
@ -174,10 +174,6 @@ gunicorn==20.1.0 \
--hash=sha256:9dcc4547dbb1cb284accfb15ab5667a0e5d1881cc443e0677b4882a4067a807e \
--hash=sha256:e0a968b5ba15f8a328fdfd7ab1fcb5af4470c28aaf7e55df02a99bc13138e6e8
# via -r requirements.in
importlib-metadata==6.6.0 \
--hash=sha256:43dd286a2cd8995d5eaef7fee2066340423b818ed3fd70adf0bad5f1fac53fed \
--hash=sha256:92501cdf9cc66ebd3e612f1b4f0c0765dfa42f0fa38ffb319b6bd84dd675d705
# via flask
itsdangerous==2.1.2 \
--hash=sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44 \
--hash=sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a
@ -247,6 +243,96 @@ python-dateutil==2.8.2 \
--hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \
--hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9
# via -r requirements.in
regex==2023.5.5 \
--hash=sha256:02f4541550459c08fdd6f97aa4e24c6f1932eec780d58a2faa2068253df7d6ff \
--hash=sha256:0a69cf0c00c4d4a929c6c7717fd918414cab0d6132a49a6d8fc3ded1988ed2ea \
--hash=sha256:0bbd5dcb19603ab8d2781fac60114fb89aee8494f4505ae7ad141a3314abb1f9 \
--hash=sha256:10250a093741ec7bf74bcd2039e697f519b028518f605ff2aa7ac1e9c9f97423 \
--hash=sha256:10374c84ee58c44575b667310d5bbfa89fb2e64e52349720a0182c0017512f6c \
--hash=sha256:1189fbbb21e2c117fda5303653b61905aeeeea23de4a94d400b0487eb16d2d60 \
--hash=sha256:1307aa4daa1cbb23823d8238e1f61292fd07e4e5d8d38a6efff00b67a7cdb764 \
--hash=sha256:144b5b017646b5a9392a5554a1e5db0000ae637be4971c9747566775fc96e1b2 \
--hash=sha256:171c52e320fe29260da550d81c6b99f6f8402450dc7777ef5ced2e848f3b6f8f \
--hash=sha256:18196c16a584619c7c1d843497c069955d7629ad4a3fdee240eb347f4a2c9dbe \
--hash=sha256:18f05d14f14a812fe9723f13afafefe6b74ca042d99f8884e62dbd34dcccf3e2 \
--hash=sha256:1ecf3dcff71f0c0fe3e555201cbe749fa66aae8d18f80d2cc4de8e66df37390a \
--hash=sha256:21e90a288e6ba4bf44c25c6a946cb9b0f00b73044d74308b5e0afd190338297c \
--hash=sha256:23d86ad2121b3c4fc78c58f95e19173790e22ac05996df69b84e12da5816cb17 \
--hash=sha256:256f7f4c6ba145f62f7a441a003c94b8b1af78cee2cccacfc1e835f93bc09426 \
--hash=sha256:290fd35219486dfbc00b0de72f455ecdd63e59b528991a6aec9fdfc0ce85672e \
--hash=sha256:2e9c4f778514a560a9c9aa8e5538bee759b55f6c1dcd35613ad72523fd9175b8 \
--hash=sha256:338994d3d4ca4cf12f09822e025731a5bdd3a37aaa571fa52659e85ca793fb67 \
--hash=sha256:33d430a23b661629661f1fe8395be2004006bc792bb9fc7c53911d661b69dd7e \
--hash=sha256:385992d5ecf1a93cb85adff2f73e0402dd9ac29b71b7006d342cc920816e6f32 \
--hash=sha256:3d45864693351c15531f7e76f545ec35000d50848daa833cead96edae1665559 \
--hash=sha256:40005cbd383438aecf715a7b47fe1e3dcbc889a36461ed416bdec07e0ef1db66 \
--hash=sha256:4035d6945cb961c90c3e1c1ca2feb526175bcfed44dfb1cc77db4fdced060d3e \
--hash=sha256:445d6f4fc3bd9fc2bf0416164454f90acab8858cd5a041403d7a11e3356980e8 \
--hash=sha256:48c9ec56579d4ba1c88f42302194b8ae2350265cb60c64b7b9a88dcb7fbde309 \
--hash=sha256:4a5059bd585e9e9504ef9c07e4bc15b0a621ba20504388875d66b8b30a5c4d18 \
--hash=sha256:4a6e4b0e0531223f53bad07ddf733af490ba2b8367f62342b92b39b29f72735a \
--hash=sha256:4b870b6f632fc74941cadc2a0f3064ed8409e6f8ee226cdfd2a85ae50473aa94 \
--hash=sha256:50fd2d9b36938d4dcecbd684777dd12a407add4f9f934f235c66372e630772b0 \
--hash=sha256:53e22e4460f0245b468ee645156a4f84d0fc35a12d9ba79bd7d79bdcd2f9629d \
--hash=sha256:586a011f77f8a2da4b888774174cd266e69e917a67ba072c7fc0e91878178a80 \
--hash=sha256:59597cd6315d3439ed4b074febe84a439c33928dd34396941b4d377692eca810 \
--hash=sha256:59e4b729eae1a0919f9e4c0fc635fbcc9db59c74ad98d684f4877be3d2607dd6 \
--hash=sha256:5a0f874ee8c0bc820e649c900243c6d1e6dc435b81da1492046716f14f1a2a96 \
--hash=sha256:5ac2b7d341dc1bd102be849d6dd33b09701223a851105b2754339e390be0627a \
--hash=sha256:5e3f4468b8c6fd2fd33c218bbd0a1559e6a6fcf185af8bb0cc43f3b5bfb7d636 \
--hash=sha256:6164d4e2a82f9ebd7752a06bd6c504791bedc6418c0196cd0a23afb7f3e12b2d \
--hash=sha256:6893544e06bae009916a5658ce7207e26ed17385149f35a3125f5259951f1bbe \
--hash=sha256:690a17db524ee6ac4a27efc5406530dd90e7a7a69d8360235323d0e5dafb8f5b \
--hash=sha256:6b8d0c153f07a953636b9cdb3011b733cadd4178123ef728ccc4d5969e67f3c2 \
--hash=sha256:72a28979cc667e5f82ef433db009184e7ac277844eea0f7f4d254b789517941d \
--hash=sha256:72aa4746993a28c841e05889f3f1b1e5d14df8d3daa157d6001a34c98102b393 \
--hash=sha256:732176f5427e72fa2325b05c58ad0b45af341c459910d766f814b0584ac1f9ac \
--hash=sha256:7918a1b83dd70dc04ab5ed24c78ae833ae8ea228cef84e08597c408286edc926 \
--hash=sha256:7923470d6056a9590247ff729c05e8e0f06bbd4efa6569c916943cb2d9b68b91 \
--hash=sha256:7d76a8a1fc9da08296462a18f16620ba73bcbf5909e42383b253ef34d9d5141e \
--hash=sha256:811040d7f3dd9c55eb0d8b00b5dcb7fd9ae1761c454f444fd9f37fe5ec57143a \
--hash=sha256:821a88b878b6589c5068f4cc2cfeb2c64e343a196bc9d7ac68ea8c2a776acd46 \
--hash=sha256:84397d3f750d153ebd7f958efaa92b45fea170200e2df5e0e1fd4d85b7e3f58a \
--hash=sha256:844671c9c1150fcdac46d43198364034b961bd520f2c4fdaabfc7c7d7138a2dd \
--hash=sha256:890a09cb0a62198bff92eda98b2b507305dd3abf974778bae3287f98b48907d3 \
--hash=sha256:8f08276466fedb9e36e5193a96cb944928301152879ec20c2d723d1031cd4ddd \
--hash=sha256:8f5e06df94fff8c4c85f98c6487f6636848e1dc85ce17ab7d1931df4a081f657 \
--hash=sha256:921473a93bcea4d00295799ab929522fc650e85c6b9f27ae1e6bb32a790ea7d3 \
--hash=sha256:941b3f1b2392f0bcd6abf1bc7a322787d6db4e7457be6d1ffd3a693426a755f2 \
--hash=sha256:9b320677521aabf666cdd6e99baee4fb5ac3996349c3b7f8e7c4eee1c00dfe3a \
--hash=sha256:9c3efee9bb53cbe7b285760c81f28ac80dc15fa48b5fe7e58b52752e642553f1 \
--hash=sha256:9fda3e50abad8d0f48df621cf75adc73c63f7243cbe0e3b2171392b445401550 \
--hash=sha256:a4c5da39bca4f7979eefcbb36efea04471cd68db2d38fcbb4ee2c6d440699833 \
--hash=sha256:a56c18f21ac98209da9c54ae3ebb3b6f6e772038681d6cb43b8d53da3b09ee81 \
--hash=sha256:a623564d810e7a953ff1357f7799c14bc9beeab699aacc8b7ab7822da1e952b8 \
--hash=sha256:a8906669b03c63266b6a7693d1f487b02647beb12adea20f8840c1a087e2dfb5 \
--hash=sha256:a99757ad7fe5c8a2bb44829fc57ced11253e10f462233c1255fe03888e06bc19 \
--hash=sha256:aa7d032c1d84726aa9edeb6accf079b4caa87151ca9fabacef31fa028186c66d \
--hash=sha256:aad5524c2aedaf9aa14ef1bc9327f8abd915699dea457d339bebbe2f0d218f86 \
--hash=sha256:afb1c70ec1e594a547f38ad6bf5e3d60304ce7539e677c1429eebab115bce56e \
--hash=sha256:b6365703e8cf1644b82104cdd05270d1a9f043119a168d66c55684b1b557d008 \
--hash=sha256:b8b942d8b3ce765dbc3b1dad0a944712a89b5de290ce8f72681e22b3c55f3cc8 \
--hash=sha256:ba73a14e9c8f9ac409863543cde3290dba39098fc261f717dc337ea72d3ebad2 \
--hash=sha256:bd7b68fd2e79d59d86dcbc1ccd6e2ca09c505343445daaa4e07f43c8a9cc34da \
--hash=sha256:bd966475e963122ee0a7118ec9024388c602d12ac72860f6eea119a3928be053 \
--hash=sha256:c2ce65bdeaf0a386bb3b533a28de3994e8e13b464ac15e1e67e4603dd88787fa \
--hash=sha256:c64d5abe91a3dfe5ff250c6bb267ef00dbc01501518225b45a5f9def458f31fb \
--hash=sha256:c8c143a65ce3ca42e54d8e6fcaf465b6b672ed1c6c90022794a802fb93105d22 \
--hash=sha256:cd46f30e758629c3ee91713529cfbe107ac50d27110fdcc326a42ce2acf4dafc \
--hash=sha256:ced02e3bd55e16e89c08bbc8128cff0884d96e7f7a5633d3dc366b6d95fcd1d6 \
--hash=sha256:cf123225945aa58b3057d0fba67e8061c62d14cc8a4202630f8057df70189051 \
--hash=sha256:d19e57f888b00cd04fc38f5e18d0efbd91ccba2d45039453ab2236e6eec48d4d \
--hash=sha256:d1cbe6b5be3b9b698d8cc4ee4dee7e017ad655e83361cd0ea8e653d65e469468 \
--hash=sha256:db09e6c18977a33fea26fe67b7a842f706c67cf8bda1450974d0ae0dd63570df \
--hash=sha256:de2f780c3242ea114dd01f84848655356af4dd561501896c751d7b885ea6d3a1 \
--hash=sha256:e2205a81f815b5bb17e46e74cc946c575b484e5f0acfcb805fb252d67e22938d \
--hash=sha256:e645c757183ee0e13f0bbe56508598e2d9cd42b8abc6c0599d53b0d0b8dd1479 \
--hash=sha256:f2910502f718828cecc8beff004917dcf577fc5f8f5dd40ffb1ea7612124547b \
--hash=sha256:f764e4dfafa288e2eba21231f455d209f4709436baeebb05bdecfb5d8ddc3d35 \
--hash=sha256:f83fe9e10f9d0b6cf580564d4d23845b9d692e4c91bd8be57733958e4c602956 \
--hash=sha256:fb2b495dd94b02de8215625948132cc2ea360ae84fe6634cd19b6567709c8ae2 \
--hash=sha256:fee0016cc35a8a91e8cc9312ab26a6fe638d484131a7afa79e1ce6165328a135
# via -r requirements.in
six==1.16.0 \
--hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
--hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
@ -255,9 +341,9 @@ soupsieve==2.4.1 \
--hash=sha256:1c1bfee6819544a3447586c889157365a27e10d88cde3ad3da0cf0ddf646feb8 \
--hash=sha256:89d12b2d5dfcd2c9e8c22326da9d9aa9cb3dfab0a83a024f05704076ee8d35ea
# via beautifulsoup4
werkzeug==2.3.2 \
--hash=sha256:2f3278e9ef61511cdf82cc28fc5da0f5b501dd8f01ecf5ef6a5d810048f68702 \
--hash=sha256:b7b8bc1609f35ae8e45d48a9b58d7a4eb1e41eec148d37e977e5df6ebf3398b2
werkzeug==2.3.3 \
--hash=sha256:4866679a0722de00796a74086238bb3b98d90f423f05de039abb09315487254a \
--hash=sha256:a987caf1092edc7523edb139edb20c70571c4a8d5eed02e0b547b4739174d091
# via
# flask
# flask-login
@ -265,10 +351,6 @@ wtforms==3.0.1 \
--hash=sha256:6b351bbb12dd58af57ffef05bc78425d08d1914e0fd68ee14143b7ade023c5bc \
--hash=sha256:837f2f0e0ca79481b92884962b914eba4e72b7a2daaf1f939c890ed0124b834b
# via flask-wtf
zipp==3.15.0 \
--hash=sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b \
--hash=sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556
# via importlib-metadata
zope-event==4.6 \
--hash=sha256:73d9e3ef750cca14816a9c322c7250b0d7c9dbc337df5d1b807ff8d3d0b9e97c \
--hash=sha256:81d98813046fc86cc4136e3698fee628a3282f9c320db18658c21749235fce80

View File

@ -21,6 +21,13 @@
state: forcereinstall
executable: pip3
- name: Pin version for urllib
pip:
name: urllib3<2
state: forcereinstall
executable: pip3
extra_args:
- name: Init Docker Swarm
community.general.docker_swarm:
advertise_addr: "{{ local_ip }}"

View File

@ -1,4 +1,4 @@
FROM fedora:37
FROM fedora:38
ENV container docker
ENV NGINX_VERSION 1.24.0

View File

@ -4,6 +4,7 @@ ENV NGINX_VERSION 1.24.0
# Copy centos repo
COPY src/linux/centos.repo /etc/yum.repos.d/centos.repo
RUN sed -i "s/%ARCH%/$(uname -m)/g" src/linux/centos.repo /etc/yum.repos.d/centos.repo
# Copy RPM-GPG-KEY-CentOS-Official
COPY src/linux/RPM-GPG-KEY-centosofficial /etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial

View File

@ -13,6 +13,14 @@ RUN GECKODRIVER_VERSION=`curl -i https://github.com/mozilla/geckodriver/releases
WORKDIR /opt/tests_ui
COPY requirements.txt .
RUN pip install --no-cache -r requirements.txt
RUN touch test.txt && \
zip -r test.zip test.txt && \
rm test.txt
RUN echo '{ \
"id": "discord", \
"order": 999, \
@ -29,13 +37,8 @@ RUN echo '{ \
RUN apk del .build-deps && \
rm -rf /var/cache/apk/*
COPY main.py .
ENV PYTHONUNBUFFERED=1
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY main.py .
COPY test.zip .
CMD ["python3", "main.py"]

View File

@ -55,7 +55,7 @@ services:
environment:
ABSOLUTE_URI: "http://www.example.com/admin/"
ADMIN_USERNAME: "admin"
ADMIN_PASSWORD: "admin"
ADMIN_PASSWORD: "S$$cr3tP@ssw0rd"
DOCKER_HOST: "tcp://bw-docker-proxy:2375"
volumes:
- bw-data:/data
@ -81,6 +81,7 @@ services:
volumes:
bw-data:
networks:
bw-universe:
name: bw-universe

View File

@ -245,7 +245,7 @@ try:
username_input = safe_get_element(driver, By.ID, "username")
password_input = safe_get_element(driver, By.ID, "password")
username_input.send_keys("admin")
password_input.send_keys("admin")
password_input.send_keys("S$cr3tP@ssw0rd")
access_page(
driver,
@ -909,7 +909,7 @@ try:
False,
)
assert_alert_message(driver, "is not a valid plugin")
sleep(2)
print(
"The bad plugin has been rejected, trying to add a good plugin ...",
@ -928,8 +928,6 @@ try:
False,
)
assert_alert_message(driver, "Successfully created plugin")
external_plugins = safe_get_element(
driver,
By.XPATH,

Binary file not shown.

View File

@ -44,12 +44,12 @@ if [ $i -ge 120 ] ; then
fi
# Start tests
docker-compose -f docker-compose.tests.yml build
docker-compose -f docker-compose.test.yml build
if [ $? -ne 0 ] ; then
echo "❌ Build failed"
exit 1
fi
docker-compose -f docker-compose.tests.yml up --abort-on-container-exit --exit-code-from ui-tests
docker-compose -f docker-compose.test.yml up --abort-on-container-exit --exit-code-from ui-tests
# Exit
exit $?