Merge pull request #341 from TheophileDiot/1.5
Update the Database and make it easier to gen
This commit is contained in:
commit
0e6a5f3f96
|
@ -12,12 +12,18 @@ class Config(ConfigCaller):
|
|||
self.__ctrl_type = ctrl_type
|
||||
self.__lock = lock
|
||||
self.__logger = setup_logger("Config", getenv("LOG_LEVEL", "INFO"))
|
||||
self._db = None
|
||||
self.__instances = []
|
||||
self.__services = []
|
||||
self.__configs = []
|
||||
self.__config = {}
|
||||
|
||||
self._db = Database(self.__logger)
|
||||
while not self._db.is_initialized():
|
||||
self.__logger.warning(
|
||||
"Database is not initialized, retrying in 5 seconds ...",
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
def __get_full_env(self) -> dict:
|
||||
env_instances = {}
|
||||
for instance in self.__instances:
|
||||
|
@ -55,21 +61,6 @@ class Config(ConfigCaller):
|
|||
self.__configs = configs
|
||||
self.__config = self.__get_full_env()
|
||||
|
||||
if self._db is None:
|
||||
self._db = Database(
|
||||
self.__logger,
|
||||
sqlalchemy_string=self.__config.get("DATABASE_URI", None),
|
||||
bw_integration="Kubernetes"
|
||||
if self.__config.get("KUBERNETES_MODE", "no") == "yes"
|
||||
else "Cluster",
|
||||
)
|
||||
|
||||
while not self._db.is_initialized():
|
||||
self.__logger.warning(
|
||||
"Database is not initialized, retrying in 5 seconds ...",
|
||||
)
|
||||
sleep(5)
|
||||
|
||||
custom_configs = []
|
||||
for config_type in self.__configs:
|
||||
for file, data in self.__configs[config_type].items():
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
from os import _exit, getenv
|
||||
from signal import SIGINT, SIGTERM, signal
|
||||
from subprocess import DEVNULL, STDOUT, run
|
||||
from sys import exit as sys_exit, path as sys_path
|
||||
from traceback import format_exc
|
||||
|
||||
|
|
|
@ -54,9 +54,6 @@ logger = setup_logger("BLACKLIST", getenv("LOG_LEVEL", "INFO"))
|
|||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=getenv("DATABASE_URI", None),
|
||||
bw_integration="Kubernetes"
|
||||
if getenv("KUBERNETES_MODE", "no") == "yes"
|
||||
else "Cluster",
|
||||
)
|
||||
status = 0
|
||||
|
||||
|
|
|
@ -19,9 +19,6 @@ logger = setup_logger("BUNKERNET", getenv("LOG_LEVEL", "INFO"))
|
|||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=getenv("DATABASE_URI", None),
|
||||
bw_integration="Kubernetes"
|
||||
if getenv("KUBERNETES_MODE", "no") == "yes"
|
||||
else "Cluster",
|
||||
)
|
||||
status = 0
|
||||
|
||||
|
|
|
@ -19,9 +19,6 @@ logger = setup_logger("BUNKERNET", getenv("LOG_LEVEL", "INFO"))
|
|||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=getenv("DATABASE_URI", None),
|
||||
bw_integration="Kubernetes"
|
||||
if getenv("KUBERNETES_MODE", "no") == "yes"
|
||||
else "Cluster",
|
||||
)
|
||||
status = 0
|
||||
|
||||
|
|
|
@ -17,9 +17,6 @@ logger = setup_logger("CUSTOM-CERT", getenv("LOG_LEVEL", "INFO"))
|
|||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=getenv("DATABASE_URI", None),
|
||||
bw_integration="Kubernetes"
|
||||
if getenv("KUBERNETES_MODE", "no") == "yes"
|
||||
else "Cluster",
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -54,9 +54,6 @@ logger = setup_logger("GREYLIST", getenv("LOG_LEVEL", "INFO"))
|
|||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=getenv("DATABASE_URI", None),
|
||||
bw_integration="Kubernetes"
|
||||
if getenv("KUBERNETES_MODE", "no") == "yes"
|
||||
else "Cluster",
|
||||
)
|
||||
status = 0
|
||||
|
||||
|
|
|
@ -21,9 +21,6 @@ logger = setup_logger("JOBS", getenv("LOG_LEVEL", "INFO"))
|
|||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=getenv("DATABASE_URI", None),
|
||||
bw_integration="Kubernetes"
|
||||
if getenv("KUBERNETES_MODE", "no") == "yes"
|
||||
else "Cluster",
|
||||
)
|
||||
status = 0
|
||||
|
||||
|
|
|
@ -21,9 +21,6 @@ logger = setup_logger("JOBS", getenv("LOG_LEVEL", "INFO"))
|
|||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=getenv("DATABASE_URI", None),
|
||||
bw_integration="Kubernetes"
|
||||
if getenv("KUBERNETES_MODE", "no") == "yes"
|
||||
else "Cluster",
|
||||
)
|
||||
status = 0
|
||||
|
||||
|
|
|
@ -14,7 +14,6 @@ sys_path.append("/opt/bunkerweb/utils")
|
|||
sys_path.append("/opt/bunkerweb/api")
|
||||
|
||||
from docker import DockerClient
|
||||
from docker.errors import DockerException
|
||||
|
||||
from logger import setup_logger
|
||||
from API import API
|
||||
|
@ -88,12 +87,9 @@ try:
|
|||
|
||||
# Docker or Linux case
|
||||
elif bw_integration == "Docker":
|
||||
try:
|
||||
docker_client = DockerClient(base_url="tcp://docker-proxy:2375")
|
||||
except DockerException:
|
||||
docker_client = DockerClient(
|
||||
base_url=getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
|
||||
)
|
||||
docker_client = DockerClient(
|
||||
base_url=getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
|
||||
)
|
||||
|
||||
apis = []
|
||||
for instance in docker_client.containers.list(
|
||||
|
|
|
@ -36,9 +36,6 @@ logger = setup_logger("REALIP", getenv("LOG_LEVEL", "INFO"))
|
|||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=getenv("DATABASE_URI", None),
|
||||
bw_integration="Kubernetes"
|
||||
if getenv("KUBERNETES_MODE", "no") == "yes"
|
||||
else "Cluster",
|
||||
)
|
||||
status = 0
|
||||
|
||||
|
|
|
@ -19,9 +19,6 @@ logger = setup_logger("self-signed", getenv("LOG_LEVEL", "INFO"))
|
|||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=getenv("DATABASE_URI", None),
|
||||
bw_integration="Kubernetes"
|
||||
if getenv("KUBERNETES_MODE", "no") == "yes"
|
||||
else "Cluster",
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -54,9 +54,6 @@ logger = setup_logger("WHITELIST", getenv("LOG_LEVEL", "INFO"))
|
|||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=getenv("DATABASE_URI", None),
|
||||
bw_integration="Kubernetes"
|
||||
if getenv("KUBERNETES_MODE", "no") == "yes"
|
||||
else "Cluster",
|
||||
)
|
||||
status = 0
|
||||
|
||||
|
|
|
@ -13,14 +13,19 @@ class Configurator:
|
|||
def __init__(
|
||||
self,
|
||||
settings: str,
|
||||
core: str,
|
||||
core: Union[str, dict],
|
||||
plugins: str,
|
||||
variables: Union[str, dict],
|
||||
logger: Logger,
|
||||
):
|
||||
self.__logger = logger
|
||||
self.__settings = self.__load_settings(settings)
|
||||
self.__core = core
|
||||
|
||||
if isinstance(core, str):
|
||||
self.__core = self.__load_plugins(core)
|
||||
else:
|
||||
self.__core = core
|
||||
|
||||
self.__plugins_settings = []
|
||||
self.__plugins = self.__load_plugins(plugins, "plugins")
|
||||
|
||||
|
|
481
bw/gen/main.py
481
bw/gen/main.py
|
@ -2,17 +2,14 @@
|
|||
|
||||
from argparse import ArgumentParser
|
||||
from glob import glob
|
||||
from itertools import chain
|
||||
from json import loads
|
||||
from os import R_OK, W_OK, X_OK, access, environ, getenv, path, remove, unlink
|
||||
from os import R_OK, W_OK, X_OK, access, getenv, path, remove, unlink
|
||||
from os.path import exists, isdir, isfile, islink
|
||||
from re import compile as re_compile
|
||||
from shutil import rmtree
|
||||
from subprocess import DEVNULL, STDOUT, run
|
||||
from sys import exit as sys_exit, path as sys_path
|
||||
from time import sleep
|
||||
from traceback import format_exc
|
||||
from typing import Any
|
||||
|
||||
|
||||
sys_path.append("/opt/bunkerweb/deps/python")
|
||||
|
@ -20,18 +17,14 @@ sys_path.append("/opt/bunkerweb/utils")
|
|||
sys_path.append("/opt/bunkerweb/api")
|
||||
sys_path.append("/opt/bunkerweb/db")
|
||||
|
||||
from docker import DockerClient
|
||||
from kubernetes import client as kube_client
|
||||
|
||||
from logger import setup_logger
|
||||
from Database import Database
|
||||
from Configurator import Configurator
|
||||
from Templator import Templator
|
||||
from API import API
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logger = setup_logger("Generator", environ.get("LOG_LEVEL", "INFO"))
|
||||
logger = setup_logger("Generator", getenv("LOG_LEVEL", "INFO"))
|
||||
wait_retry_interval = int(getenv("WAIT_RETRY_INTERVAL", "5"))
|
||||
|
||||
try:
|
||||
|
@ -78,17 +71,6 @@ if __name__ == "__main__":
|
|||
type=str,
|
||||
help="path to the file containing environment variables",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--method",
|
||||
default="scheduler",
|
||||
type=str,
|
||||
help="The method that is used in the database",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--init",
|
||||
action="store_true",
|
||||
help="Only initialize the database",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
logger.info("Generator started ...")
|
||||
|
@ -99,439 +81,60 @@ if __name__ == "__main__":
|
|||
logger.info(f"Output : {args.output}")
|
||||
logger.info(f"Target : {args.target}")
|
||||
logger.info(f"Variables : {args.variables}")
|
||||
logger.info(f"Method : {args.method}")
|
||||
logger.info(f"Init : {args.init}")
|
||||
|
||||
custom_confs_rx = re_compile(
|
||||
r"^([0-9a-z\.\-]*)_?CUSTOM_CONF_(HTTP|DEFAULT_SERVER_HTTP|SERVER_HTTP|MODSEC|MODSEC_CRS)_(.+)$"
|
||||
)
|
||||
|
||||
# Check existences and permissions
|
||||
logger.info("Checking arguments ...")
|
||||
files = [args.settings] + ([args.variables] if args.variables else [])
|
||||
paths_rx = [args.core, args.plugins, args.templates]
|
||||
paths_rwx = [args.output]
|
||||
for file in files:
|
||||
if not path.exists(file):
|
||||
logger.error(f"Missing file : {file}")
|
||||
sys_exit(1)
|
||||
if not access(file, R_OK):
|
||||
logger.error(f"Can't read file : {file}")
|
||||
sys_exit(1)
|
||||
for _path in paths_rx + paths_rwx:
|
||||
if not path.isdir(_path):
|
||||
logger.error(f"Missing directory : {_path}")
|
||||
sys_exit(1)
|
||||
if not access(_path, R_OK | X_OK):
|
||||
logger.error(
|
||||
f"Missing RX rights on directory : {_path}",
|
||||
)
|
||||
sys_exit(1)
|
||||
for _path in paths_rwx:
|
||||
if not access(_path, W_OK):
|
||||
logger.error(
|
||||
f"Missing W rights on directory : {_path}",
|
||||
)
|
||||
sys_exit(1)
|
||||
|
||||
# Check core plugins orders
|
||||
logger.info("Checking core plugins orders ...")
|
||||
core_plugins = {}
|
||||
files = glob(f"{args.core}/*/plugin.json")
|
||||
for file in files:
|
||||
try:
|
||||
with open(file) as f:
|
||||
core_plugin = loads(f.read())
|
||||
|
||||
if core_plugin["order"] not in core_plugins:
|
||||
core_plugins[core_plugin["order"]] = []
|
||||
|
||||
core_plugins[core_plugin["order"]].append(core_plugin)
|
||||
except:
|
||||
logger.error(
|
||||
f"Exception while loading JSON from {file} : {format_exc()}",
|
||||
)
|
||||
|
||||
core_settings = {}
|
||||
for order in core_plugins:
|
||||
if len(core_plugins[order]) > 1 and order != 999:
|
||||
logger.warning(
|
||||
f"Multiple plugins have the same order ({order}) : {', '.join(plugin['id'] for plugin in core_plugins[order])}. Therefor, the execution order will be random.",
|
||||
)
|
||||
|
||||
for plugin in core_plugins[order]:
|
||||
core_settings.update(plugin["settings"])
|
||||
|
||||
integration = "Linux"
|
||||
if exists("/opt/bunkerweb/INTEGRATION"):
|
||||
if getenv("KUBERNETES_MODE", "no") == "yes":
|
||||
integration = "Kubernetes"
|
||||
elif getenv("SWARM_MODE", "no") == "yes":
|
||||
integration = "Swarm"
|
||||
elif getenv("AUTOCONF_MODE", "no") == "yes":
|
||||
integration = "Autoconf"
|
||||
elif exists("/opt/bunkerweb/INTEGRATION"):
|
||||
with open("/opt/bunkerweb/INTEGRATION", "r") as f:
|
||||
integration = f.read().strip()
|
||||
|
||||
if args.variables or args.init:
|
||||
# Compute the config
|
||||
logger.info("Computing config ...")
|
||||
config = Configurator(
|
||||
args.settings, core_settings, args.plugins, args.variables, logger
|
||||
)
|
||||
config_files = config.get_config()
|
||||
|
||||
if config_files.get("LOG_LEVEL", logger.level) != logger.level:
|
||||
logger = setup_logger("Generator", config_files["LOG_LEVEL"])
|
||||
|
||||
bw_integration = "Local"
|
||||
if getenv("KUBERNETES_MODE", "no") == "yes":
|
||||
bw_integration = "Kubernetes"
|
||||
elif (
|
||||
integration == "Docker"
|
||||
or getenv("SWARM_MODE", getenv("AUTOCONF_MODE", "no")) == "yes"
|
||||
):
|
||||
bw_integration = "Cluster"
|
||||
|
||||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=getenv("DATABASE_URI", None),
|
||||
bw_integration=bw_integration,
|
||||
)
|
||||
is_initialized = db.is_initialized()
|
||||
|
||||
if not is_initialized:
|
||||
ret, err = db.init_tables(
|
||||
[
|
||||
config.get_settings(),
|
||||
list(chain.from_iterable(core_plugins.values())),
|
||||
config.get_plugins_settings(),
|
||||
]
|
||||
)
|
||||
|
||||
# Initialize database tables
|
||||
if err:
|
||||
if args.variables:
|
||||
# Check existences and permissions
|
||||
logger.info("Checking arguments ...")
|
||||
files = [args.settings, args.variables]
|
||||
paths_rx = [args.core, args.plugins, args.templates]
|
||||
paths_rwx = [args.output]
|
||||
for file in files:
|
||||
if not path.exists(file):
|
||||
logger.error(f"Missing file : {file}")
|
||||
sys_exit(1)
|
||||
if not access(file, R_OK):
|
||||
logger.error(f"Can't read file : {file}")
|
||||
sys_exit(1)
|
||||
for _path in paths_rx + paths_rwx:
|
||||
if not path.isdir(_path):
|
||||
logger.error(f"Missing directory : {_path}")
|
||||
sys_exit(1)
|
||||
if not access(_path, R_OK | X_OK):
|
||||
logger.error(
|
||||
f"Exception while initializing database : {err}",
|
||||
f"Missing RX rights on directory : {_path}",
|
||||
)
|
||||
sys_exit(1)
|
||||
elif ret is False:
|
||||
logger.info(
|
||||
"Database tables are already initialized, skipping creation ...",
|
||||
)
|
||||
else:
|
||||
logger.info("Database tables initialized")
|
||||
|
||||
logger.info(
|
||||
"Database not initialized, initializing ...",
|
||||
)
|
||||
|
||||
custom_confs = [
|
||||
{"value": v, "exploded": custom_confs_rx.search(k).groups()}
|
||||
for k, v in environ.items()
|
||||
if custom_confs_rx.match(k)
|
||||
]
|
||||
|
||||
with open("/opt/bunkerweb/VERSION", "r") as f:
|
||||
bw_version = f.read().strip()
|
||||
|
||||
if bw_integration == "Local":
|
||||
err = db.save_config(config_files, args.method)
|
||||
|
||||
if not err:
|
||||
err1 = db.save_custom_configs(custom_confs, args.method)
|
||||
else:
|
||||
err = None
|
||||
err1 = None
|
||||
|
||||
integration = "Linux"
|
||||
if config_files.get("KUBERNETES_MODE", "no") == "yes":
|
||||
integration = "Kubernetes"
|
||||
elif config_files.get("SWARM_MODE", "no") == "yes":
|
||||
integration = "Swarm"
|
||||
elif config_files.get("AUTOCONF_MODE", "no") == "yes":
|
||||
integration = "Autoconf"
|
||||
elif exists("/opt/bunkerweb/INTEGRATION"):
|
||||
with open("/opt/bunkerweb/INTEGRATION", "r") as f:
|
||||
integration = f.read().strip()
|
||||
|
||||
err2 = db.initialize_db(version=bw_version, integration=integration)
|
||||
|
||||
if err or err1 or err2:
|
||||
for _path in paths_rwx:
|
||||
if not access(_path, W_OK):
|
||||
logger.error(
|
||||
f"Can't Initialize database : {err or err1 or err2}",
|
||||
f"Missing W rights on directory : {_path}",
|
||||
)
|
||||
sys_exit(1)
|
||||
else:
|
||||
logger.info("Database initialized")
|
||||
|
||||
if args.init:
|
||||
sys_exit(0)
|
||||
elif is_initialized:
|
||||
logger.info(
|
||||
"Database is already initialized, skipping ...",
|
||||
)
|
||||
|
||||
config = db.get_config()
|
||||
elif integration == "Docker":
|
||||
bw_integration = "Cluster"
|
||||
docker_client = DockerClient(
|
||||
base_url=getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
|
||||
)
|
||||
|
||||
def get_instance_configs_and_apis(instance: Any, db, _type="Docker"):
|
||||
api_http_port = None
|
||||
api_server_name = None
|
||||
tmp_config = {}
|
||||
custom_confs = []
|
||||
apis = []
|
||||
|
||||
for var in (
|
||||
instance.attrs["Config"]["Env"]
|
||||
if _type == "Docker"
|
||||
else instance.attrs["Spec"]["TaskTemplate"]["ContainerSpec"]["Env"]
|
||||
):
|
||||
splitted = var.split("=", 1)
|
||||
if custom_confs_rx.match(splitted[0]):
|
||||
custom_confs.append(
|
||||
{
|
||||
"value": splitted[1],
|
||||
"exploded": custom_confs_rx.search(
|
||||
splitted[0]
|
||||
).groups(),
|
||||
}
|
||||
)
|
||||
else:
|
||||
tmp_config[splitted[0]] = splitted[1]
|
||||
|
||||
if db is None and splitted[0] == "DATABASE_URI":
|
||||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=splitted[1],
|
||||
)
|
||||
elif splitted[0] == "API_HTTP_PORT":
|
||||
api_http_port = splitted[1]
|
||||
elif splitted[0] == "API_SERVER_NAME":
|
||||
api_server_name = splitted[1]
|
||||
|
||||
apis.append(
|
||||
API(
|
||||
f"http://{instance.name}:{api_http_port or getenv('API_HTTP_PORT', '5000')}",
|
||||
host=api_server_name or getenv("API_SERVER_NAME", "bwapi"),
|
||||
)
|
||||
)
|
||||
|
||||
return tmp_config, custom_confs, apis, db
|
||||
|
||||
tmp_config = {}
|
||||
custom_confs = []
|
||||
apis = []
|
||||
db = None
|
||||
|
||||
for instance in docker_client.containers.list(
|
||||
filters={"label": "bunkerweb.INSTANCE"}
|
||||
):
|
||||
conf, cstm_confs, tmp_apis, tmp_db = get_instance_configs_and_apis(
|
||||
instance, db
|
||||
)
|
||||
tmp_config.update(conf)
|
||||
custom_confs.extend(cstm_confs)
|
||||
apis.extend(tmp_apis)
|
||||
if db is None:
|
||||
db = tmp_db
|
||||
|
||||
is_swarm = True
|
||||
try:
|
||||
docker_client.swarm.version
|
||||
except:
|
||||
is_swarm = False
|
||||
|
||||
if is_swarm:
|
||||
for instance in docker_client.services.list(
|
||||
filters={"label": "bunkerweb.INSTANCE"}
|
||||
):
|
||||
conf, cstm_confs, tmp_apis, tmp_db = get_instance_configs_and_apis(
|
||||
instance, db, "Swarm"
|
||||
)
|
||||
tmp_config.update(conf)
|
||||
custom_confs.extend(cstm_confs)
|
||||
apis.extend(tmp_apis)
|
||||
if db is None:
|
||||
db = tmp_db
|
||||
|
||||
if db is None:
|
||||
db = Database(logger)
|
||||
|
||||
# Compute the config
|
||||
logger.info("Computing config ...")
|
||||
config = Configurator(
|
||||
args.settings, core_settings, args.plugins, tmp_config, logger
|
||||
args.settings, args.core, args.plugins, args.variables, logger
|
||||
)
|
||||
config_files = config.get_config()
|
||||
|
||||
if config_files.get("LOG_LEVEL", logger.level) != logger.level:
|
||||
logger = setup_logger("Generator", config_files["LOG_LEVEL"])
|
||||
|
||||
err = db.save_config(config_files, args.method)
|
||||
|
||||
if not err:
|
||||
err1 = db.save_custom_configs(custom_confs, args.method)
|
||||
else:
|
||||
err = None
|
||||
err1 = None
|
||||
|
||||
if err or err1:
|
||||
logger.error(
|
||||
f"Can't save config to database : {err or err1}",
|
||||
)
|
||||
sys_exit(1)
|
||||
else:
|
||||
logger.info("Config successfully saved to database")
|
||||
|
||||
config = db.get_config()
|
||||
elif integration == "Kubernetes":
|
||||
bw_integration = "Kubernetes"
|
||||
corev1 = kube_client.CoreV1Api()
|
||||
tmp_config = {}
|
||||
apis = []
|
||||
db = None
|
||||
|
||||
for pod in corev1.list_pod_for_all_namespaces(watch=False).items:
|
||||
if (
|
||||
pod.metadata.annotations != None
|
||||
and "bunkerweb.io/INSTANCE" in pod.metadata.annotations
|
||||
):
|
||||
api_http_port = None
|
||||
api_server_name = None
|
||||
|
||||
for pod_env in pod.spec.containers[0].env:
|
||||
tmp_config[pod_env.name] = pod_env.value
|
||||
|
||||
if db is None and pod_env.name == "DATABASE_URI":
|
||||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=pod_env.value,
|
||||
)
|
||||
elif pod_env.name == "API_HTTP_PORT":
|
||||
api_http_port = pod_env.value
|
||||
elif pod_env.name == "API_SERVER_NAME":
|
||||
api_server_name = pod_env.value
|
||||
|
||||
apis.append(
|
||||
API(
|
||||
f"http://{pod.status.pod_ip}:{api_http_port or getenv('API_HTTP_PORT', '5000')}",
|
||||
host=api_server_name or getenv("API_SERVER_NAME", "bwapi"),
|
||||
)
|
||||
)
|
||||
|
||||
if db is None:
|
||||
db = Database(logger)
|
||||
|
||||
# Compute the config
|
||||
logger.info("Computing config ...")
|
||||
config = Configurator(
|
||||
args.settings, core_settings, args.plugins, tmp_config, logger
|
||||
)
|
||||
config_files = config.get_config()
|
||||
|
||||
if config_files.get("LOG_LEVEL", logger.level) != logger.level:
|
||||
logger = setup_logger("Generator", config_files["LOG_LEVEL"])
|
||||
|
||||
err = db.save_config(config_files, args.method)
|
||||
|
||||
if not err:
|
||||
supported_config_types = [
|
||||
"http",
|
||||
"stream",
|
||||
"server-http",
|
||||
"server-stream",
|
||||
"default-server-http",
|
||||
"modsec",
|
||||
"modsec-crs",
|
||||
]
|
||||
custom_confs = []
|
||||
|
||||
for configmap in corev1.list_config_map_for_all_namespaces(
|
||||
watch=False
|
||||
).items:
|
||||
if (
|
||||
configmap.metadata.annotations is None
|
||||
or "bunkerweb.io/CONFIG_TYPE"
|
||||
not in configmap.metadata.annotations
|
||||
):
|
||||
continue
|
||||
|
||||
config_type = configmap.metadata.annotations[
|
||||
"bunkerweb.io/CONFIG_TYPE"
|
||||
]
|
||||
|
||||
if config_type not in supported_config_types:
|
||||
logger.warning(
|
||||
f"Ignoring unsupported CONFIG_TYPE {config_type} for ConfigMap {configmap.metadata.name}",
|
||||
)
|
||||
continue
|
||||
elif not configmap.data:
|
||||
logger.warning(
|
||||
f"Ignoring blank ConfigMap {configmap.metadata.name}",
|
||||
)
|
||||
continue
|
||||
|
||||
config_site = ""
|
||||
if "bunkerweb.io/CONFIG_SITE" in configmap.metadata.annotations:
|
||||
config_site = f"{configmap.metadata.annotations['bunkerweb.io/CONFIG_SITE']}/"
|
||||
|
||||
for config_name, config_data in configmap.data.items():
|
||||
custom_confs.append(
|
||||
{
|
||||
"value": config_data,
|
||||
"exploded": (config_site, config_type, config_name),
|
||||
}
|
||||
)
|
||||
|
||||
err1 = db.save_custom_configs(custom_confs, args.method)
|
||||
else:
|
||||
err = None
|
||||
err1 = None
|
||||
|
||||
if err or err1:
|
||||
logger.error(
|
||||
f"Can't save config to database : {err or err1}",
|
||||
)
|
||||
sys_exit(1)
|
||||
else:
|
||||
logger.info("Config successfully saved to database")
|
||||
|
||||
config = db.get_config()
|
||||
config = config.get_config()
|
||||
else:
|
||||
db = Database(
|
||||
logger,
|
||||
bw_integration="Kubernetes"
|
||||
if getenv("KUBERNETES_MODE", "no") == "yes"
|
||||
else "Cluster",
|
||||
sqlalchemy_string=getenv("DATABASE_URI", None),
|
||||
)
|
||||
|
||||
config = db.get_config()
|
||||
|
||||
bw_integration = "Local"
|
||||
if config.get("KUBERNETES_MODE", "no") == "yes":
|
||||
bw_integration = "Kubernetes"
|
||||
elif (
|
||||
config.get("SWARM_MODE", "no") == "yes"
|
||||
or config.get("AUTOCONF_MODE", "no") == "yes"
|
||||
):
|
||||
bw_integration = "Cluster"
|
||||
|
||||
logger = setup_logger("Generator", config.get("LOG_LEVEL", "INFO"))
|
||||
|
||||
if bw_integration == "Local":
|
||||
retries = 0
|
||||
while not exists("/opt/bunkerweb/tmp/nginx.pid"):
|
||||
if retries == 5:
|
||||
logger.error(
|
||||
"BunkerWeb's nginx didn't start in time.",
|
||||
)
|
||||
sys_exit(1)
|
||||
|
||||
logger.warning(
|
||||
"Waiting for BunkerWeb's nginx to start, retrying in 5 seconds ...",
|
||||
)
|
||||
retries += 1
|
||||
sleep(5)
|
||||
|
||||
# Remove old files
|
||||
logger.info("Removing old files ...")
|
||||
files = glob(f"{args.output}/*")
|
||||
|
@ -555,7 +158,21 @@ if __name__ == "__main__":
|
|||
)
|
||||
templator.render()
|
||||
|
||||
if bw_integration == "Local":
|
||||
if integration == "Linux":
|
||||
retries = 0
|
||||
while not exists("/opt/bunkerweb/tmp/nginx.pid"):
|
||||
if retries == 5:
|
||||
logger.error(
|
||||
"BunkerWeb's nginx didn't start in time.",
|
||||
)
|
||||
sys_exit(1)
|
||||
|
||||
logger.warning(
|
||||
"Waiting for BunkerWeb's nginx to start, retrying in 5 seconds ...",
|
||||
)
|
||||
retries += 1
|
||||
sleep(5)
|
||||
|
||||
cmd = "/usr/sbin/nginx -s reload"
|
||||
proc = run(cmd.split(" "), stdin=DEVNULL, stderr=STDOUT)
|
||||
if proc.returncode != 0:
|
||||
|
|
|
@ -0,0 +1,398 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
from argparse import ArgumentParser
|
||||
from glob import glob
|
||||
from itertools import chain
|
||||
from json import loads
|
||||
from os import R_OK, W_OK, X_OK, access, environ, getenv, path
|
||||
from os.path import exists
|
||||
from re import compile as re_compile
|
||||
from sys import exit as sys_exit, path as sys_path
|
||||
from traceback import format_exc
|
||||
from typing import Any
|
||||
|
||||
|
||||
sys_path.append("/opt/bunkerweb/deps/python")
|
||||
sys_path.append("/opt/bunkerweb/utils")
|
||||
sys_path.append("/opt/bunkerweb/api")
|
||||
sys_path.append("/opt/bunkerweb/db")
|
||||
|
||||
from docker import DockerClient
|
||||
from kubernetes import client as kube_client
|
||||
|
||||
from logger import setup_logger
|
||||
from Database import Database
|
||||
from Configurator import Configurator
|
||||
from API import API
|
||||
|
||||
custom_confs_rx = re_compile(
|
||||
r"^([0-9a-z\.\-]*)_?CUSTOM_CONF_(HTTP|DEFAULT_SERVER_HTTP|SERVER_HTTP|MODSEC|MODSEC_CRS)_(.+)$"
|
||||
)
|
||||
|
||||
|
||||
def get_instance_configs_and_apis(instance: Any, db, _type="Docker"):
|
||||
api_http_port = None
|
||||
api_server_name = None
|
||||
tmp_config = {}
|
||||
custom_confs = []
|
||||
apis = []
|
||||
|
||||
for var in (
|
||||
instance.attrs["Config"]["Env"]
|
||||
if _type == "Docker"
|
||||
else instance.attrs["Spec"]["TaskTemplate"]["ContainerSpec"]["Env"]
|
||||
):
|
||||
splitted = var.split("=", 1)
|
||||
if custom_confs_rx.match(splitted[0]):
|
||||
custom_confs.append(
|
||||
{
|
||||
"value": splitted[1],
|
||||
"exploded": custom_confs_rx.search(splitted[0]).groups(),
|
||||
}
|
||||
)
|
||||
else:
|
||||
tmp_config[splitted[0]] = splitted[1]
|
||||
|
||||
if db is None and splitted[0] == "DATABASE_URI":
|
||||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=splitted[1],
|
||||
)
|
||||
elif splitted[0] == "API_HTTP_PORT":
|
||||
api_http_port = splitted[1]
|
||||
elif splitted[0] == "API_SERVER_NAME":
|
||||
api_server_name = splitted[1]
|
||||
|
||||
apis.append(
|
||||
API(
|
||||
f"http://{instance.name}:{api_http_port or getenv('API_HTTP_PORT', '5000')}",
|
||||
host=api_server_name or getenv("API_SERVER_NAME", "bwapi"),
|
||||
)
|
||||
)
|
||||
|
||||
return tmp_config, custom_confs, apis, db
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logger = setup_logger("Generator", getenv("LOG_LEVEL", "INFO"))
|
||||
wait_retry_interval = int(getenv("WAIT_RETRY_INTERVAL", "5"))
|
||||
|
||||
try:
|
||||
# Parse arguments
|
||||
parser = ArgumentParser(description="BunkerWeb config saver")
|
||||
parser.add_argument(
|
||||
"--settings",
|
||||
default="/opt/bunkerweb/settings.json",
|
||||
type=str,
|
||||
help="file containing the main settings",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--core",
|
||||
default="/opt/bunkerweb/core",
|
||||
type=str,
|
||||
help="directory containing the core plugins",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--plugins",
|
||||
default="/opt/bunkerweb/plugins",
|
||||
type=str,
|
||||
help="directory containing the external plugins",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--variables",
|
||||
type=str,
|
||||
help="path to the file containing environment variables",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--init",
|
||||
action="store_true",
|
||||
help="Only initialize the database",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
logger.info("First gen started ...")
|
||||
logger.info(f"Settings : {args.settings}")
|
||||
logger.info(f"Core : {args.core}")
|
||||
logger.info(f"Plugins : {args.plugins}")
|
||||
logger.info(f"Init : {args.init}")
|
||||
|
||||
integration = "Linux"
|
||||
if getenv("KUBERNETES_MODE", "no") == "yes":
|
||||
integration = "Kubernetes"
|
||||
elif getenv("SWARM_MODE", "no") == "yes":
|
||||
integration = "Swarm"
|
||||
elif getenv("AUTOCONF_MODE", "no") == "yes":
|
||||
integration = "Autoconf"
|
||||
elif exists("/opt/bunkerweb/INTEGRATION"):
|
||||
with open("/opt/bunkerweb/INTEGRATION", "r") as f:
|
||||
integration = f.read().strip()
|
||||
|
||||
logger.info(f"Detected {integration} integration")
|
||||
|
||||
if args.variables:
|
||||
logger.info(f"Variables : {args.variables}")
|
||||
|
||||
# Check existences and permissions
|
||||
logger.info("Checking arguments ...")
|
||||
files = [args.settings, args.variables]
|
||||
paths_rx = [args.core, args.plugins]
|
||||
for file in files:
|
||||
if not path.exists(file):
|
||||
logger.error(f"Missing file : {file}")
|
||||
sys_exit(1)
|
||||
if not access(file, R_OK):
|
||||
logger.error(f"Can't read file : {file}")
|
||||
sys_exit(1)
|
||||
for _path in paths_rx:
|
||||
if not path.isdir(_path):
|
||||
logger.error(f"Missing directory : {_path}")
|
||||
sys_exit(1)
|
||||
if not access(_path, R_OK | X_OK):
|
||||
logger.error(
|
||||
f"Missing RX rights on directory : {_path}",
|
||||
)
|
||||
sys_exit(1)
|
||||
|
||||
# Check core plugins orders
|
||||
logger.info("Checking core plugins orders ...")
|
||||
core_plugins = {}
|
||||
files = glob(f"{args.core}/*/plugin.json")
|
||||
for file in files:
|
||||
try:
|
||||
with open(file) as f:
|
||||
core_plugin = loads(f.read())
|
||||
|
||||
if core_plugin["order"] not in core_plugins:
|
||||
core_plugins[core_plugin["order"]] = []
|
||||
|
||||
core_plugins[core_plugin["order"]].append(core_plugin)
|
||||
except:
|
||||
logger.error(
|
||||
f"Exception while loading JSON from {file} : {format_exc()}",
|
||||
)
|
||||
|
||||
core_settings = {}
|
||||
for order in core_plugins:
|
||||
if len(core_plugins[order]) > 1 and order != 999:
|
||||
logger.warning(
|
||||
f"Multiple plugins have the same order ({order}) : {', '.join(plugin['id'] for plugin in core_plugins[order])}. Therefor, the execution order will be random.",
|
||||
)
|
||||
|
||||
for plugin in core_plugins[order]:
|
||||
core_settings.update(plugin["settings"])
|
||||
|
||||
# Compute the config
|
||||
logger.info("Computing config ...")
|
||||
config = Configurator(
|
||||
args.settings, core_settings, args.plugins, args.variables, logger
|
||||
)
|
||||
config_files = config.get_config()
|
||||
|
||||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=getenv("DATABASE_URI", None),
|
||||
)
|
||||
is_initialized = db.is_initialized()
|
||||
|
||||
if not is_initialized:
|
||||
ret, err = db.init_tables(
|
||||
[
|
||||
config.get_settings(),
|
||||
list(chain.from_iterable(core_plugins.values())),
|
||||
config.get_plugins_settings(),
|
||||
]
|
||||
)
|
||||
|
||||
# Initialize database tables
|
||||
if err:
|
||||
logger.error(
|
||||
f"Exception while initializing database : {err}",
|
||||
)
|
||||
sys_exit(1)
|
||||
elif ret is False:
|
||||
logger.info(
|
||||
"Database tables are already initialized, skipping creation ...",
|
||||
)
|
||||
else:
|
||||
logger.info("Database tables initialized")
|
||||
|
||||
logger.info(
|
||||
"Database not initialized, initializing ...",
|
||||
)
|
||||
|
||||
custom_confs = [
|
||||
{"value": v, "exploded": custom_confs_rx.search(k).groups()}
|
||||
for k, v in environ.items()
|
||||
if custom_confs_rx.match(k)
|
||||
]
|
||||
|
||||
with open("/opt/bunkerweb/VERSION", "r") as f:
|
||||
bw_version = f.read().strip()
|
||||
|
||||
err = db.save_config(config_files, "scheduler")
|
||||
|
||||
if not err:
|
||||
err1 = db.save_custom_configs(custom_confs, "scheduler")
|
||||
|
||||
if not err1:
|
||||
err2 = db.initialize_db(
|
||||
version=bw_version, integration=integration
|
||||
)
|
||||
|
||||
if err or err1 or err2:
|
||||
logger.error(
|
||||
f"Can't Initialize database : {err or err1 or err2}",
|
||||
)
|
||||
sys_exit(1)
|
||||
else:
|
||||
logger.info("Database initialized")
|
||||
|
||||
if args.init:
|
||||
sys_exit(0)
|
||||
elif is_initialized:
|
||||
logger.info(
|
||||
"Database is already initialized, skipping ...",
|
||||
)
|
||||
|
||||
sys_exit(0)
|
||||
elif integration == "Kubernetes":
|
||||
corev1 = kube_client.CoreV1Api()
|
||||
tmp_config = {}
|
||||
apis = []
|
||||
db = None
|
||||
|
||||
for pod in corev1.list_pod_for_all_namespaces(watch=False).items:
|
||||
if (
|
||||
pod.metadata.annotations != None
|
||||
and "bunkerweb.io/INSTANCE" in pod.metadata.annotations
|
||||
):
|
||||
api_http_port = None
|
||||
api_server_name = None
|
||||
|
||||
for pod_env in pod.spec.containers[0].env:
|
||||
tmp_config[pod_env.name] = pod_env.value
|
||||
|
||||
if db is None and pod_env.name == "DATABASE_URI":
|
||||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=pod_env.value,
|
||||
)
|
||||
elif pod_env.name == "API_HTTP_PORT":
|
||||
api_http_port = pod_env.value
|
||||
elif pod_env.name == "API_SERVER_NAME":
|
||||
api_server_name = pod_env.value
|
||||
|
||||
apis.append(
|
||||
API(
|
||||
f"http://{pod.status.pod_ip}:{api_http_port or getenv('API_HTTP_PORT', '5000')}",
|
||||
host=api_server_name or getenv("API_SERVER_NAME", "bwapi"),
|
||||
)
|
||||
)
|
||||
|
||||
supported_config_types = [
|
||||
"http",
|
||||
"stream",
|
||||
"server-http",
|
||||
"server-stream",
|
||||
"default-server-http",
|
||||
"modsec",
|
||||
"modsec-crs",
|
||||
]
|
||||
custom_confs = []
|
||||
|
||||
for configmap in corev1.list_config_map_for_all_namespaces(
|
||||
watch=False
|
||||
).items:
|
||||
if (
|
||||
configmap.metadata.annotations is None
|
||||
or "bunkerweb.io/CONFIG_TYPE" not in configmap.metadata.annotations
|
||||
):
|
||||
continue
|
||||
|
||||
config_type = configmap.metadata.annotations["bunkerweb.io/CONFIG_TYPE"]
|
||||
|
||||
if config_type not in supported_config_types:
|
||||
logger.warning(
|
||||
f"Ignoring unsupported CONFIG_TYPE {config_type} for ConfigMap {configmap.metadata.name}",
|
||||
)
|
||||
continue
|
||||
elif not configmap.data:
|
||||
logger.warning(
|
||||
f"Ignoring blank ConfigMap {configmap.metadata.name}",
|
||||
)
|
||||
continue
|
||||
|
||||
config_site = ""
|
||||
if "bunkerweb.io/CONFIG_SITE" in configmap.metadata.annotations:
|
||||
config_site = (
|
||||
f"{configmap.metadata.annotations['bunkerweb.io/CONFIG_SITE']}/"
|
||||
)
|
||||
|
||||
for config_name, config_data in configmap.data.items():
|
||||
custom_confs.append(
|
||||
{
|
||||
"value": config_data,
|
||||
"exploded": (config_site, config_type, config_name),
|
||||
}
|
||||
)
|
||||
else:
|
||||
docker_client = DockerClient(
|
||||
base_url=getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
|
||||
)
|
||||
|
||||
tmp_config = {}
|
||||
custom_confs = []
|
||||
apis = []
|
||||
db = None
|
||||
|
||||
for instance in (
|
||||
docker_client.containers.list(filters={"label": "bunkerweb.INSTANCE"})
|
||||
if integration == "Docker"
|
||||
else docker_client.services.list(
|
||||
filters={"label": "bunkerweb.INSTANCE"}
|
||||
)
|
||||
):
|
||||
conf, cstm_confs, tmp_apis, tmp_db = get_instance_configs_and_apis(
|
||||
instance, db, integration
|
||||
)
|
||||
tmp_config.update(conf)
|
||||
custom_confs.extend(cstm_confs)
|
||||
apis.extend(tmp_apis)
|
||||
if db is None:
|
||||
db = tmp_db
|
||||
|
||||
if db is None:
|
||||
db = Database(logger)
|
||||
|
||||
# Compute the config
|
||||
logger.info("Computing config ...")
|
||||
config = Configurator(
|
||||
args.settings, args.core, args.plugins, tmp_config, logger
|
||||
)
|
||||
config_files = config.get_config()
|
||||
|
||||
err = db.save_config(config_files, "scheduler")
|
||||
|
||||
if not err:
|
||||
err1 = db.save_custom_configs(custom_confs, "scheduler")
|
||||
else:
|
||||
err = None
|
||||
err1 = None
|
||||
|
||||
if err or err1:
|
||||
logger.error(
|
||||
f"Can't save config to database : {err or err1}",
|
||||
)
|
||||
sys_exit(1)
|
||||
else:
|
||||
logger.info("Config successfully saved to database")
|
||||
except SystemExit as e:
|
||||
sys_exit(e)
|
||||
except:
|
||||
logger.error(
|
||||
f"Exception while executing config saver : {format_exc()}",
|
||||
)
|
||||
sys_exit(1)
|
||||
|
||||
# We're done
|
||||
logger.info("Config saver successfully executed !")
|
|
@ -23,12 +23,7 @@ from jobs import file_hash
|
|||
|
||||
|
||||
class Database:
|
||||
def __init__(
|
||||
self,
|
||||
logger: Logger,
|
||||
sqlalchemy_string: str = None,
|
||||
bw_integration: str = "Local",
|
||||
) -> None:
|
||||
def __init__(self, logger: Logger, sqlalchemy_string: str = None) -> None:
|
||||
"""Initialize the database"""
|
||||
self.__logger = logger
|
||||
self.__sql_session = None
|
||||
|
@ -38,60 +33,6 @@ class Database:
|
|||
logger.level if logger.level != INFO else WARNING
|
||||
)
|
||||
|
||||
if sqlalchemy_string is None and bw_integration != "Local":
|
||||
if bw_integration == "Kubernetes":
|
||||
from kubernetes import client as kube_client
|
||||
|
||||
corev1 = kube_client.CoreV1Api()
|
||||
for pod in corev1.list_pod_for_all_namespaces(watch=False).items:
|
||||
if (
|
||||
pod.metadata.annotations != None
|
||||
and "bunkerweb.io/INSTANCE" in pod.metadata.annotations
|
||||
):
|
||||
for pod_env in pod.spec.containers[0].env:
|
||||
if pod_env.name == "DATABASE_URI":
|
||||
sqlalchemy_string = pod_env.value
|
||||
break
|
||||
|
||||
if sqlalchemy_string:
|
||||
break
|
||||
else:
|
||||
from docker import DockerClient
|
||||
|
||||
docker_client = DockerClient(
|
||||
base_url=getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
|
||||
)
|
||||
for instance in docker_client.containers.list(
|
||||
filters={"label": "bunkerweb.INSTANCE"}
|
||||
):
|
||||
for var in instance.attrs["Config"]["Env"]:
|
||||
if var.startswith("DATABASE_URI="):
|
||||
sqlalchemy_string = var.replace("DATABASE_URI=", "", 1)
|
||||
break
|
||||
|
||||
if sqlalchemy_string:
|
||||
break
|
||||
|
||||
is_swarm = True
|
||||
try:
|
||||
docker_client.swarm.version
|
||||
except:
|
||||
is_swarm = False
|
||||
|
||||
if not sqlalchemy_string and is_swarm:
|
||||
for instance in docker_client.services.list(
|
||||
filters={"label": "bunkerweb.INSTANCE"}
|
||||
):
|
||||
for var in instance.attrs["Spec"]["TaskTemplate"][
|
||||
"ContainerSpec"
|
||||
]["Env"]:
|
||||
if var.startswith("DATABASE_URI="):
|
||||
sqlalchemy_string = var.replace("DATABASE_URI=", "", 1)
|
||||
break
|
||||
|
||||
if sqlalchemy_string:
|
||||
break
|
||||
|
||||
if not sqlalchemy_string:
|
||||
sqlalchemy_string = getenv("DATABASE_URI", "sqlite:////data/db.sqlite3")
|
||||
|
||||
|
@ -376,8 +317,9 @@ class Database:
|
|||
)
|
||||
|
||||
if service_setting is None:
|
||||
if value == setting.default or (
|
||||
key in config and value == config[key]
|
||||
if key != "SERVER_NAME" and (
|
||||
value == setting.default
|
||||
or (key in config and value == config[key])
|
||||
):
|
||||
continue
|
||||
|
||||
|
@ -391,8 +333,9 @@ class Database:
|
|||
)
|
||||
)
|
||||
elif method == "autoconf":
|
||||
if value == setting.default or (
|
||||
key in config and value == config[key]
|
||||
if key != "SERVER_NAME" and (
|
||||
value == setting.default
|
||||
or (key in config and value == config[key])
|
||||
):
|
||||
session.query(Services_settings).filter(
|
||||
Services_settings.service_id == server_name,
|
||||
|
|
|
@ -29,13 +29,12 @@ CUSTOM_CONFIGS_TYPES = Enum(
|
|||
)
|
||||
LOG_LEVELS_ENUM = Enum("DEBUG", "INFO", "WARNING", "ERROR")
|
||||
INTEGRATIONS_ENUM = Enum(
|
||||
"Docker",
|
||||
"Linux",
|
||||
"Docker",
|
||||
"Swarm",
|
||||
"Kubernetes",
|
||||
"Autoconf",
|
||||
"Ansible",
|
||||
"Vagrant",
|
||||
"Windows",
|
||||
"Unknown",
|
||||
)
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ RUN apk add --no-cache bash libgcc libstdc++ openssl git && \
|
|||
find /opt/bunkerweb -type f -exec chmod 0740 {} \; && \
|
||||
find /opt/bunkerweb -type d -exec chmod 0750 {} \; && \
|
||||
chmod 770 /opt/bunkerweb/tmp && \
|
||||
chmod 750 /opt/bunkerweb/gen/main.py /opt/bunkerweb/scheduler/main.py /opt/bunkerweb/scheduler/entrypoint.sh /opt/bunkerweb/helpers/*.sh /opt/bunkerweb/deps/python/bin/* && \
|
||||
chmod 750 /opt/bunkerweb/gen/*.py /opt/bunkerweb/scheduler/main.py /opt/bunkerweb/scheduler/entrypoint.sh /opt/bunkerweb/helpers/*.sh /opt/bunkerweb/deps/python/bin/* && \
|
||||
find /opt/bunkerweb/core/*/jobs/* -type f -exec chmod 750 {} \; && \
|
||||
mkdir /etc/nginx && \
|
||||
chown -R scheduler:scheduler /etc/nginx && \
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
from glob import glob
|
||||
from json import loads
|
||||
from logging import Logger
|
||||
from os import environ
|
||||
from os import environ, getenv
|
||||
from subprocess import DEVNULL, PIPE, STDOUT, run
|
||||
from schedule import (
|
||||
clear as schedule_clear,
|
||||
|
@ -25,17 +25,13 @@ class JobScheduler(ApiCaller):
|
|||
env={},
|
||||
lock=None,
|
||||
apis=[],
|
||||
logger: Logger = setup_logger("Scheduler", environ.get("LOG_LEVEL", "INFO")),
|
||||
bw_integration: str = "Local",
|
||||
logger: Logger = setup_logger("Scheduler", getenv("LOG_LEVEL", "INFO")),
|
||||
integration: str = "Linux",
|
||||
):
|
||||
super().__init__(apis)
|
||||
self.__logger = logger
|
||||
self.__bw_integration = bw_integration
|
||||
self.__db = Database(
|
||||
self.__logger,
|
||||
sqlalchemy_string=env.get("DATABASE_URI", None),
|
||||
bw_integration=self.__bw_integration,
|
||||
)
|
||||
self.__integration = integration
|
||||
self.__db = Database(self.__logger)
|
||||
self.__env = env
|
||||
self.__env.update(environ)
|
||||
self.__jobs = self.__get_jobs()
|
||||
|
@ -75,7 +71,7 @@ class JobScheduler(ApiCaller):
|
|||
|
||||
def __reload(self):
|
||||
reload = True
|
||||
if self.__bw_integration == "Local":
|
||||
if self.__integration == "Linux":
|
||||
self.__logger.info("Reloading nginx ...")
|
||||
proc = run(
|
||||
["/usr/sbin/nginx", "-s", "reload"],
|
||||
|
|
|
@ -39,7 +39,7 @@ fi
|
|||
|
||||
# Init database
|
||||
get_env > "/tmp/variables.env"
|
||||
/opt/bunkerweb/gen/main.py --variables /tmp/variables.env --method scheduler --init
|
||||
/opt/bunkerweb/gen/save_config.py --variables /tmp/variables.env --init
|
||||
if [ "$?" -ne 0 ] ; then
|
||||
log "ENTRYPOINT" "❌" "Scheduler generator failed"
|
||||
exit 1
|
||||
|
|
|
@ -118,18 +118,11 @@ if __name__ == "__main__":
|
|||
)
|
||||
args = parser.parse_args()
|
||||
generate = args.generate == "yes"
|
||||
integration = "Linux"
|
||||
api_caller = ApiCaller()
|
||||
|
||||
logger.info("Scheduler started ...")
|
||||
|
||||
bw_integration = (
|
||||
"Local"
|
||||
if not isfile("/usr/sbin/nginx")
|
||||
and not isfile("/opt/bunkerweb/tmp/nginx.pid")
|
||||
else "Cluster"
|
||||
)
|
||||
|
||||
api_caller = ApiCaller()
|
||||
|
||||
if args.variables:
|
||||
logger.info(f"Variables : {args.variables}")
|
||||
|
||||
|
@ -137,45 +130,25 @@ if __name__ == "__main__":
|
|||
env = dotenv_values(args.variables)
|
||||
else:
|
||||
# Read from database
|
||||
bw_integration = (
|
||||
"Kubernetes" if getenv("KUBERNETES_MODE", "no") == "yes" else "Cluster"
|
||||
)
|
||||
|
||||
integration = "Docker"
|
||||
if exists("/opt/bunkerweb/INTEGRATION"):
|
||||
with open("/opt/bunkerweb/INTEGRATION", "r") as f:
|
||||
integration = f.read().strip()
|
||||
|
||||
api_caller.auto_setup(bw_integration=bw_integration)
|
||||
api_caller.auto_setup(bw_integration=integration)
|
||||
|
||||
if integration == "Docker" and generate is True:
|
||||
# run the generator
|
||||
cmd = f"python /opt/bunkerweb/gen/main.py --settings /opt/bunkerweb/settings.json --templates /opt/bunkerweb/confs --output /etc/nginx{f' --variables {args.variables}' if args.variables else ''} --method scheduler"
|
||||
# run the config saver
|
||||
cmd = f"python /opt/bunkerweb/gen/save_config.py --settings /opt/bunkerweb/settings.json"
|
||||
proc = subprocess_run(cmd.split(" "), stdin=DEVNULL, stderr=STDOUT)
|
||||
if proc.returncode != 0:
|
||||
logger.error(
|
||||
"Config generator failed, configuration will not work as expected...",
|
||||
"Config saver failed, configuration will not work as expected...",
|
||||
)
|
||||
|
||||
# Fix permissions for the nginx folder
|
||||
for root, dirs, files in walk("/etc/nginx", topdown=False):
|
||||
for name in files + dirs:
|
||||
chown(join(root, name), "scheduler", "scheduler")
|
||||
chmod(join(root, name), 0o770)
|
||||
|
||||
if len(api_caller._get_apis()) > 0:
|
||||
# send nginx configs
|
||||
logger.info("Sending /etc/nginx folder ...")
|
||||
ret = api_caller._send_files("/etc/nginx", "/confs")
|
||||
if not ret:
|
||||
logger.error(
|
||||
"Sending nginx configs failed, configuration will not work as expected...",
|
||||
)
|
||||
|
||||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=getenv("DATABASE_URI", None),
|
||||
bw_integration=bw_integration,
|
||||
)
|
||||
|
||||
while not db.is_initialized():
|
||||
|
@ -184,7 +157,7 @@ if __name__ == "__main__":
|
|||
)
|
||||
sleep(5)
|
||||
|
||||
if bw_integration == "Kubernetes" or integration in (
|
||||
if integration in (
|
||||
"Swarm",
|
||||
"Kubernetes",
|
||||
"Autoconf",
|
||||
|
@ -265,7 +238,7 @@ if __name__ == "__main__":
|
|||
if isfile(join(root, name)):
|
||||
chmod(join(root, name), 0o740)
|
||||
|
||||
if bw_integration != "Local":
|
||||
if integration != "Linux":
|
||||
logger.info("Sending custom configs to BunkerWeb")
|
||||
ret = api_caller._send_files("/data/configs", "/custom_configs")
|
||||
|
||||
|
@ -281,7 +254,7 @@ if __name__ == "__main__":
|
|||
env=deepcopy(env),
|
||||
apis=api_caller._get_apis(),
|
||||
logger=logger,
|
||||
bw_integration=bw_integration,
|
||||
integration=integration,
|
||||
)
|
||||
|
||||
# Only run jobs once
|
||||
|
@ -292,7 +265,7 @@ if __name__ == "__main__":
|
|||
|
||||
if generate is True:
|
||||
# run the generator
|
||||
cmd = f"python /opt/bunkerweb/gen/main.py --settings /opt/bunkerweb/settings.json --templates /opt/bunkerweb/confs --output /etc/nginx{f' --variables {args.variables}' if args.variables else ''} --method scheduler"
|
||||
cmd = f"python /opt/bunkerweb/gen/main.py --settings /opt/bunkerweb/settings.json --templates /opt/bunkerweb/confs --output /etc/nginx{f' --variables {args.variables}' if args.variables else ''}"
|
||||
proc = subprocess_run(cmd.split(" "), stdin=DEVNULL, stderr=STDOUT)
|
||||
if proc.returncode != 0:
|
||||
logger.error(
|
||||
|
@ -334,7 +307,7 @@ if __name__ == "__main__":
|
|||
logger.info("Successfuly sent /data/cache folder")
|
||||
|
||||
# reload nginx
|
||||
if bw_integration == "Local":
|
||||
if integration == "Linux":
|
||||
logger.info("Reloading nginx ...")
|
||||
proc = run(
|
||||
["/usr/sbin/nginx", "-s", "reload"],
|
||||
|
@ -410,7 +383,7 @@ if __name__ == "__main__":
|
|||
if isfile(join(root, name)):
|
||||
chmod(join(root, name), 0o740)
|
||||
|
||||
if bw_integration != "Local":
|
||||
if integration != "Linux":
|
||||
logger.info("Sending custom configs to BunkerWeb")
|
||||
ret = api_caller._send_files("/data/configs", "/custom_configs")
|
||||
|
||||
|
|
16
ui/main.py
16
ui/main.py
|
@ -118,29 +118,31 @@ PLUGIN_KEYS = [
|
|||
"settings",
|
||||
]
|
||||
|
||||
bw_integration = "Local"
|
||||
integration = "Linux"
|
||||
if getenv("KUBERNETES_MODE", "no") == "yes":
|
||||
bw_integration = "Kubernetes"
|
||||
elif getenv("SWARM_MODE", "no") == "yes" or getenv("AUTOCONF_MODE", "no") == "yes":
|
||||
bw_integration = "Cluster"
|
||||
integration = "Kubernetes"
|
||||
elif getenv("SWARM_MODE", "no") == "yes":
|
||||
integration = "Swarm"
|
||||
elif getenv("AUTOCONF_MODE", "no") == "yes":
|
||||
integration = "Autoconf"
|
||||
|
||||
try:
|
||||
docker_client: DockerClient = DockerClient(
|
||||
base_url=vars.get("DOCKER_HOST", "unix:///var/run/docker.sock")
|
||||
)
|
||||
bw_integration = "Cluster"
|
||||
integration = "Cluster"
|
||||
except (docker_APIError, DockerException):
|
||||
logger.warning("No docker host found")
|
||||
docker_client = None
|
||||
|
||||
db = Database(logger, bw_integration=bw_integration)
|
||||
db = Database(logger)
|
||||
|
||||
try:
|
||||
app.config.update(
|
||||
DEBUG=True,
|
||||
SECRET_KEY=vars["FLASK_SECRET"],
|
||||
ABSOLUTE_URI=vars["ABSOLUTE_URI"],
|
||||
INSTANCES=Instances(docker_client, bw_integration),
|
||||
INSTANCES=Instances(docker_client, integration),
|
||||
CONFIG=Config(logger, db),
|
||||
CONFIGFILES=ConfigFiles(logger, db),
|
||||
SESSION_COOKIE_DOMAIN=vars["ABSOLUTE_URI"]
|
||||
|
|
|
@ -59,9 +59,9 @@ class Instance:
|
|||
|
||||
|
||||
class Instances:
|
||||
def __init__(self, docker_client, bw_integration: str):
|
||||
def __init__(self, docker_client, integration: str):
|
||||
self.__docker = docker_client
|
||||
self.__bw_integration = bw_integration
|
||||
self.__integration = integration
|
||||
|
||||
def __instance_from_id(self, _id) -> Instance:
|
||||
instances: list[Instance] = self.get_instances()
|
||||
|
@ -104,35 +104,28 @@ class Instances:
|
|||
apiCaller,
|
||||
)
|
||||
)
|
||||
elif self.__integration == "Swarm":
|
||||
for instance in self.__docker.services.list(
|
||||
filters={"label": "bunkerweb.INSTANCE"}
|
||||
):
|
||||
status = "down"
|
||||
desired_tasks = instance.attrs["ServiceStatus"]["DesiredTasks"]
|
||||
running_tasks = instance.attrs["ServiceStatus"]["RunningTasks"]
|
||||
if desired_tasks > 0 and (desired_tasks == running_tasks):
|
||||
status = "up"
|
||||
|
||||
is_swarm = True
|
||||
try:
|
||||
self.__docker.swarm.version
|
||||
except:
|
||||
is_swarm = False
|
||||
|
||||
if is_swarm:
|
||||
for instance in self.__docker.services.list(
|
||||
filters={"label": "bunkerweb.INSTANCE"}
|
||||
):
|
||||
status = "down"
|
||||
desired_tasks = instance.attrs["ServiceStatus"]["DesiredTasks"]
|
||||
running_tasks = instance.attrs["ServiceStatus"]["RunningTasks"]
|
||||
if desired_tasks > 0 and (desired_tasks == running_tasks):
|
||||
status = "up"
|
||||
|
||||
instances.append(
|
||||
Instance(
|
||||
instance.id,
|
||||
instance.name,
|
||||
instance.name,
|
||||
"service",
|
||||
status,
|
||||
instance,
|
||||
apiCaller,
|
||||
)
|
||||
instances.append(
|
||||
Instance(
|
||||
instance.id,
|
||||
instance.name,
|
||||
instance.name,
|
||||
"service",
|
||||
status,
|
||||
instance,
|
||||
apiCaller,
|
||||
)
|
||||
elif self.__bw_integration == "Kubernetes":
|
||||
)
|
||||
elif self.__integration == "Kubernetes":
|
||||
corev1 = kube_client.CoreV1Api()
|
||||
for pod in corev1.list_pod_for_all_namespaces(watch=False).items:
|
||||
if (
|
||||
|
|
|
@ -1,10 +1,20 @@
|
|||
from io import BytesIO
|
||||
from os import environ, getenv
|
||||
from sys import path as sys_path
|
||||
from tarfile import open as taropen
|
||||
|
||||
if "/opt/bunkerweb/utils" not in sys_path:
|
||||
sys_path.append("/opt/bunkerweb/utils")
|
||||
|
||||
from logger import setup_logger
|
||||
from API import API
|
||||
|
||||
if "/opt/bunkerweb/deps/python" not in sys_path:
|
||||
sys_path.append("/opt/bunkerweb/deps/python")
|
||||
|
||||
from kubernetes import client as kube_client
|
||||
from docker import DockerClient
|
||||
|
||||
|
||||
class ApiCaller:
|
||||
def __init__(self, apis=[]):
|
||||
|
@ -12,12 +22,13 @@ class ApiCaller:
|
|||
self.__logger = setup_logger("Api", environ.get("LOG_LEVEL", "INFO"))
|
||||
|
||||
def auto_setup(self, bw_integration: str = None):
|
||||
if bw_integration is None and getenv("KUBERNETES_MODE", "no") == "yes":
|
||||
bw_integration = "Kubernetes"
|
||||
if bw_integration is None:
|
||||
if getenv("KUBERNETES_MODE", "no") == "yes":
|
||||
bw_integration = "Kubernetes"
|
||||
elif getenv("SWARM_MODE", "no") == "yes":
|
||||
bw_integration = "Swarm"
|
||||
|
||||
if bw_integration == "Kubernetes":
|
||||
from kubernetes import client as kube_client
|
||||
|
||||
corev1 = kube_client.CoreV1Api()
|
||||
for pod in corev1.list_pod_for_all_namespaces(watch=False).items:
|
||||
if (
|
||||
|
@ -39,9 +50,28 @@ class ApiCaller:
|
|||
host=api_server_name or getenv("API_SERVER_NAME", "bwapi"),
|
||||
)
|
||||
)
|
||||
else:
|
||||
from docker import DockerClient
|
||||
elif bw_integration == "Swarm":
|
||||
for instance in docker_client.services.list(
|
||||
filters={"label": "bunkerweb.INSTANCE"}
|
||||
):
|
||||
api_http_port = None
|
||||
api_server_name = None
|
||||
|
||||
for var in instance.attrs["Spec"]["TaskTemplate"]["ContainerSpec"][
|
||||
"Env"
|
||||
]:
|
||||
if var.startswith("API_HTTP_PORT="):
|
||||
api_http_port = var.replace("API_HTTP_PORT=", "", 1)
|
||||
elif var.startswith("API_SERVER_NAME="):
|
||||
api_server_name = var.replace("API_SERVER_NAME=", "", 1)
|
||||
|
||||
self.__apis.append(
|
||||
API(
|
||||
f"http://{instance.name}:{api_http_port or getenv('API_HTTP_PORT', '5000')}",
|
||||
host=api_server_name or getenv("API_SERVER_NAME", "bwapi"),
|
||||
)
|
||||
)
|
||||
else:
|
||||
docker_client = DockerClient(
|
||||
base_url=getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
|
||||
)
|
||||
|
@ -64,34 +94,6 @@ class ApiCaller:
|
|||
)
|
||||
)
|
||||
|
||||
is_swarm = True
|
||||
try:
|
||||
docker_client.swarm.version
|
||||
except:
|
||||
is_swarm = False
|
||||
|
||||
if is_swarm:
|
||||
for instance in docker_client.services.list(
|
||||
filters={"label": "bunkerweb.INSTANCE"}
|
||||
):
|
||||
api_http_port = None
|
||||
api_server_name = None
|
||||
|
||||
for var in instance.attrs["Spec"]["TaskTemplate"]["ContainerSpec"][
|
||||
"Env"
|
||||
]:
|
||||
if var.startswith("API_HTTP_PORT="):
|
||||
api_http_port = var.replace("API_HTTP_PORT=", "", 1)
|
||||
elif var.startswith("API_SERVER_NAME="):
|
||||
api_server_name = var.replace("API_SERVER_NAME=", "", 1)
|
||||
|
||||
self.__apis.append(
|
||||
API(
|
||||
f"http://{instance.name}:{api_http_port or getenv('API_HTTP_PORT', '5000')}",
|
||||
host=api_server_name or getenv("API_SERVER_NAME", "bwapi"),
|
||||
)
|
||||
)
|
||||
|
||||
def _set_apis(self, apis):
|
||||
self.__apis = apis
|
||||
|
||||
|
|
Loading…
Reference in New Issue