Refactor the py files
This commit is contained in:
parent
cfe5c6063a
commit
9c22f1e971
|
@ -8,7 +8,7 @@ from logger import setup_logger
|
|||
|
||||
class Config(ConfigCaller):
|
||||
def __init__(self, ctrl_type, lock=None):
|
||||
ConfigCaller.__init__(self)
|
||||
super().__init__()
|
||||
self.__ctrl_type = ctrl_type
|
||||
self.__lock = lock
|
||||
self.__logger = setup_logger("Config", getenv("LOG_LEVEL", "INFO"))
|
||||
|
@ -20,29 +20,25 @@ class Config(ConfigCaller):
|
|||
self._db = Database(self.__logger)
|
||||
|
||||
def __get_full_env(self) -> dict:
|
||||
env_instances = {}
|
||||
env_instances = {"SERVER_NAME": ""}
|
||||
for instance in self.__instances:
|
||||
for variable, value in instance["env"].items():
|
||||
env_instances[variable] = value
|
||||
env_services = {}
|
||||
if not "SERVER_NAME" in env_instances:
|
||||
env_instances["SERVER_NAME"] = ""
|
||||
for service in self.__services:
|
||||
server_name = service["SERVER_NAME"].split(" ")[0]
|
||||
for variable, value in service.items():
|
||||
env_services[
|
||||
f"{service['SERVER_NAME'].split(' ')[0]}_{variable}"
|
||||
] = value
|
||||
if env_instances["SERVER_NAME"] != "":
|
||||
env_instances["SERVER_NAME"] += " "
|
||||
env_instances["SERVER_NAME"] += service["SERVER_NAME"].split(" ")[0]
|
||||
env_services[f"{server_name}_{variable}"] = value
|
||||
env_instances["SERVER_NAME"] += f" {server_name}"
|
||||
env_instances["SERVER_NAME"] = env_instances["SERVER_NAME"].strip()
|
||||
return self._full_env(env_instances, env_services)
|
||||
|
||||
def update_needed(self, instances, services, configs=None) -> bool:
|
||||
if instances != self.__instances:
|
||||
return True
|
||||
if services != self.__services:
|
||||
elif services != self.__services:
|
||||
return True
|
||||
if not configs is None and configs != self.__configs:
|
||||
elif not configs is None and configs != self.__configs:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
@ -86,7 +82,7 @@ class Config(ConfigCaller):
|
|||
if err:
|
||||
success = False
|
||||
self.__logger.error(
|
||||
f"Can't save autoconf config in database: {err}",
|
||||
f"Can't save config in database: {err}, config may not work as expected",
|
||||
)
|
||||
|
||||
# save custom configs to database
|
||||
|
@ -94,7 +90,7 @@ class Config(ConfigCaller):
|
|||
if err:
|
||||
success = False
|
||||
self.__logger.error(
|
||||
f"Can't save autoconf custom configs in database: {err}",
|
||||
f"Can't save autoconf custom configs in database: {err}, custom configs may not work as expected",
|
||||
)
|
||||
|
||||
return success
|
||||
|
|
|
@ -21,16 +21,17 @@ class Controller(ABC):
|
|||
"modsec",
|
||||
"modsec-crs",
|
||||
]
|
||||
self._configs = {}
|
||||
for config_type in self._supported_config_types:
|
||||
self._configs[config_type] = {}
|
||||
self._configs = {
|
||||
config_type: {} for config_type in self._supported_config_types
|
||||
}
|
||||
self._config = Config(ctrl_type, lock)
|
||||
self.__logger = setup_logger("Controller", getenv("LOG_LEVEL", "INFO"))
|
||||
|
||||
def wait(self, wait_time):
|
||||
while True:
|
||||
all_ready = False
|
||||
while not all_ready:
|
||||
self._instances = self.get_instances()
|
||||
if len(self._instances) == 0:
|
||||
if not self._instances:
|
||||
self.__logger.warning(
|
||||
f"No instance found, waiting {wait_time}s ...",
|
||||
)
|
||||
|
@ -45,8 +46,6 @@ class Controller(ABC):
|
|||
sleep(wait_time)
|
||||
all_ready = False
|
||||
break
|
||||
if all_ready:
|
||||
break
|
||||
return self._instances
|
||||
|
||||
@abstractmethod
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from os import getenv
|
||||
from docker import DockerClient
|
||||
from re import search
|
||||
from re import compile as re_compile
|
||||
from traceback import format_exc
|
||||
|
||||
from Controller import Controller
|
||||
|
@ -10,14 +10,20 @@ from logger import setup_logger
|
|||
|
||||
class DockerController(Controller, ConfigCaller):
|
||||
def __init__(self, docker_host):
|
||||
super().__init__("docker")
|
||||
Controller.__init__(self, "docker")
|
||||
ConfigCaller.__init__(self)
|
||||
self.__client = DockerClient(base_url=docker_host)
|
||||
self.__logger = setup_logger("docker-controller", getenv("LOG_LEVEL", "INFO"))
|
||||
self.__custom_confs_rx = re_compile(
|
||||
r"^bunkerweb.CUSTOM_CONF_(SERVER_HTTP|MODSEC_CRS|MODSEC)_(.+)$"
|
||||
)
|
||||
|
||||
def _get_controller_instances(self):
|
||||
return self.__client.containers.list(filters={"label": "bunkerweb.INSTANCE"})
|
||||
|
||||
def _get_controller_services(self):
|
||||
return self.__client.containers.list(filters={"label": "bunkerweb.SERVER_NAME"})
|
||||
|
||||
def _to_instances(self, controller_instance):
|
||||
instance = {}
|
||||
instance["name"] = controller_instance.name
|
||||
|
@ -34,9 +40,6 @@ class DockerController(Controller, ConfigCaller):
|
|||
instance["env"][variable] = value
|
||||
return [instance]
|
||||
|
||||
def _get_controller_services(self):
|
||||
return self.__client.containers.list(filters={"label": "bunkerweb.SERVER_NAME"})
|
||||
|
||||
def _to_services(self, controller_service):
|
||||
service = {}
|
||||
for variable, value in controller_service.labels.items():
|
||||
|
@ -54,62 +57,59 @@ class DockerController(Controller, ConfigCaller):
|
|||
for instance in self.__client.containers.list(
|
||||
filters={"label": "bunkerweb.INSTANCE"}
|
||||
):
|
||||
if not instance.attrs or not instance.attrs.get("Config", {}).get("Env"):
|
||||
continue
|
||||
|
||||
for env in instance.attrs["Config"]["Env"]:
|
||||
variable = env.split("=")[0]
|
||||
value = env.replace(f"{variable}=", "", 1)
|
||||
variables[variable] = value
|
||||
server_names = []
|
||||
if "SERVER_NAME" in variables and variables["SERVER_NAME"] != "":
|
||||
server_names = variables["SERVER_NAME"].split(" ")
|
||||
for server_name in server_names:
|
||||
service = {}
|
||||
service["SERVER_NAME"] = server_name
|
||||
for variable, value in variables.items():
|
||||
prefix = variable.split("_")[0]
|
||||
real_variable = variable.replace(f"{prefix}_", "", 1)
|
||||
if prefix == server_name and self._is_multisite_setting(real_variable):
|
||||
service[real_variable] = value
|
||||
services.append(service)
|
||||
|
||||
if "SERVER_NAME" in variables and variables["SERVER_NAME"].strip():
|
||||
for server_name in variables["SERVER_NAME"].strip().split(" "):
|
||||
service = {"SERVER_NAME": server_name}
|
||||
for variable, value in variables.items():
|
||||
prefix = variable.split("_")[0]
|
||||
real_variable = variable.replace(f"{prefix}_", "", 1)
|
||||
if prefix == server_name and self._is_multisite_setting(
|
||||
real_variable
|
||||
):
|
||||
service[real_variable] = value
|
||||
services.append(service)
|
||||
return services
|
||||
|
||||
def get_configs(self):
|
||||
configs = {}
|
||||
for config_type in self._supported_config_types:
|
||||
configs[config_type] = {}
|
||||
configs = {config_type: {} for config_type in self._supported_config_types}
|
||||
# get site configs from labels
|
||||
for container in self.__client.containers.list(
|
||||
filters={"label": "bunkerweb.SERVER_NAME"}
|
||||
):
|
||||
labels = container.labels # type: ignore (labels is inside a container)
|
||||
if isinstance(labels, list):
|
||||
labels = {label: "" for label in labels}
|
||||
|
||||
# extract server_name
|
||||
server_name = ""
|
||||
for variable, value in container.labels.items():
|
||||
if not variable.startswith("bunkerweb."):
|
||||
continue
|
||||
real_variable = variable.replace("bunkerweb.", "", 1)
|
||||
if real_variable == "SERVER_NAME":
|
||||
server_name = value.split(" ")[0]
|
||||
break
|
||||
server_name = labels.get("bunkerweb.SERVER_NAME", "").split(" ")[0]
|
||||
|
||||
# extract configs
|
||||
if server_name == "":
|
||||
if not server_name:
|
||||
continue
|
||||
for variable, value in container.labels.items():
|
||||
|
||||
for variable, value in labels.items():
|
||||
if not variable.startswith("bunkerweb."):
|
||||
continue
|
||||
real_variable = variable.replace("bunkerweb.", "", 1)
|
||||
result = search(
|
||||
r"^CUSTOM_CONF_(SERVER_HTTP|MODSEC_CRS|MODSEC)_(.+)$",
|
||||
real_variable,
|
||||
)
|
||||
result = self.__custom_confs_rx.search(variable)
|
||||
if result is None:
|
||||
continue
|
||||
cfg_type = result.group(1).lower().replace("_", "-")
|
||||
cfg_name = result.group(2)
|
||||
configs[cfg_type][f"{server_name}/{cfg_name}"] = value
|
||||
configs[result.group(1).lower().replace("_", "-")][
|
||||
f"{server_name}/{result.group(2)}"
|
||||
] = value
|
||||
return configs
|
||||
|
||||
def apply_config(self):
|
||||
ret = self._config.apply(self._instances, self._services, configs=self._configs)
|
||||
return ret
|
||||
return self._config.apply(
|
||||
self._instances, self._services, configs=self._configs
|
||||
)
|
||||
|
||||
def process_events(self):
|
||||
for _ in self.__client.events(decode=True, filters={"type": "container"}):
|
||||
|
@ -122,10 +122,9 @@ class DockerController(Controller, ConfigCaller):
|
|||
):
|
||||
continue
|
||||
self.__logger.info(
|
||||
"Catched Docker event, deploying new configuration ..."
|
||||
"Caught Docker event, deploying new configuration ..."
|
||||
)
|
||||
ret = self.apply_config()
|
||||
if not ret:
|
||||
if not self.apply_config():
|
||||
self.__logger.error("Error while deploying new configuration")
|
||||
else:
|
||||
self.__logger.info(
|
||||
|
|
|
@ -25,7 +25,7 @@ class IngressController(Controller, ConfigCaller):
|
|||
pod
|
||||
for pod in self.__corev1.list_pod_for_all_namespaces(watch=False).items
|
||||
if (
|
||||
pod.metadata.annotations != None
|
||||
pod.metadata.annotations
|
||||
and "bunkerweb.io/INSTANCE" in pod.metadata.annotations
|
||||
)
|
||||
]
|
||||
|
@ -35,20 +35,18 @@ class IngressController(Controller, ConfigCaller):
|
|||
instance["name"] = controller_instance.metadata.name
|
||||
instance["hostname"] = controller_instance.status.pod_ip
|
||||
health = False
|
||||
if controller_instance.status.conditions is not None:
|
||||
if controller_instance.status.conditions:
|
||||
for condition in controller_instance.status.conditions:
|
||||
if condition.type == "Ready" and condition.status == "True":
|
||||
health = True
|
||||
break
|
||||
instance["health"] = health
|
||||
instance["env"] = {}
|
||||
for env in controller_instance.spec.containers[0].env:
|
||||
if env.value is not None:
|
||||
instance["env"][env.name] = env.value
|
||||
else:
|
||||
instance["env"][env.name] = ""
|
||||
instance["env"] = {
|
||||
env.name: env.value or ""
|
||||
for env in controller_instance.spec.containers[0].env
|
||||
}
|
||||
for controller_service in self._get_controller_services():
|
||||
if controller_service.metadata.annotations is not None:
|
||||
if controller_service.metadata.annotations:
|
||||
for (
|
||||
annotation,
|
||||
value,
|
||||
|
@ -64,65 +62,73 @@ class IngressController(Controller, ConfigCaller):
|
|||
return self.__networkingv1.list_ingress_for_all_namespaces(watch=False).items
|
||||
|
||||
def _to_services(self, controller_service):
|
||||
if controller_service.spec is None or controller_service.spec.rules is None:
|
||||
if not controller_service.spec or not controller_service.spec.rules:
|
||||
return []
|
||||
|
||||
services = []
|
||||
# parse rules
|
||||
for rule in controller_service.spec.rules:
|
||||
if rule.host is None:
|
||||
if not rule.host:
|
||||
self.__logger.warning(
|
||||
"Ignoring unsupported ingress rule without host.",
|
||||
)
|
||||
continue
|
||||
service = {}
|
||||
service["SERVER_NAME"] = rule.host
|
||||
if rule.http is None:
|
||||
if not rule.http:
|
||||
services.append(service)
|
||||
continue
|
||||
location = 1
|
||||
for path in rule.http.paths:
|
||||
if path.path is None:
|
||||
if not path.path:
|
||||
self.__logger.warning(
|
||||
"Ignoring unsupported ingress rule without path.",
|
||||
)
|
||||
continue
|
||||
if path.backend.service is None:
|
||||
elif not path.backend.service:
|
||||
self.__logger.warning(
|
||||
"Ignoring unsupported ingress rule without backend service.",
|
||||
)
|
||||
continue
|
||||
if path.backend.service.port is None:
|
||||
elif not path.backend.service.port:
|
||||
self.__logger.warning(
|
||||
"Ignoring unsupported ingress rule without backend service port.",
|
||||
)
|
||||
continue
|
||||
if path.backend.service.port.number is None:
|
||||
elif not path.backend.service.port.number:
|
||||
self.__logger.warning(
|
||||
"Ignoring unsupported ingress rule without backend service port number.",
|
||||
)
|
||||
continue
|
||||
|
||||
service_list = self.__corev1.list_service_for_all_namespaces(
|
||||
watch=False,
|
||||
field_selector=f"metadata.name={path.backend.service.name}",
|
||||
).items
|
||||
if len(service_list) == 0:
|
||||
|
||||
if not service_list:
|
||||
self.__logger.warning(
|
||||
f"Ignoring ingress rule with service {path.backend.service.name} : service not found.",
|
||||
)
|
||||
continue
|
||||
|
||||
reverse_proxy_host = f"http://{path.backend.service.name}.{service_list[0].metadata.namespace}.svc.cluster.local:{path.backend.service.port.number}"
|
||||
service["USE_REVERSE_PROXY"] = "yes"
|
||||
service[f"REVERSE_PROXY_HOST_{location}"] = reverse_proxy_host
|
||||
service[f"REVERSE_PROXY_URL_{location}"] = path.path
|
||||
service.update(
|
||||
{
|
||||
"USE_REVERSE_PROXY": "yes",
|
||||
f"REVERSE_PROXY_HOST_{location}": reverse_proxy_host,
|
||||
f"REVERSE_PROXY_URL_{location}": path.path,
|
||||
}
|
||||
)
|
||||
location += 1
|
||||
services.append(service)
|
||||
|
||||
# parse tls
|
||||
if controller_service.spec.tls is not None:
|
||||
if controller_service.spec.tls: # TODO: support tls
|
||||
self.__logger.warning("Ignoring unsupported tls.")
|
||||
|
||||
# parse annotations
|
||||
if controller_service.metadata.annotations is not None:
|
||||
if controller_service.metadata.annotations:
|
||||
for service in services:
|
||||
for (
|
||||
annotation,
|
||||
|
@ -130,14 +136,12 @@ class IngressController(Controller, ConfigCaller):
|
|||
) in controller_service.metadata.annotations.items():
|
||||
if not annotation.startswith("bunkerweb.io/"):
|
||||
continue
|
||||
|
||||
variable = annotation.replace("bunkerweb.io/", "", 1)
|
||||
if not variable.startswith(
|
||||
f"{service['SERVER_NAME'].split(' ')[0]}_"
|
||||
):
|
||||
server_name = service["SERVER_NAME"].split(" ")[0]
|
||||
if not variable.startswith(f"{server_name}_"):
|
||||
continue
|
||||
variable = variable.replace(
|
||||
f"{service['SERVER_NAME'].split(' ')[0]}_", "", 1
|
||||
)
|
||||
variable = variable.replace(f"{server_name}_", "", 1)
|
||||
if self._is_multisite_setting(variable):
|
||||
service[variable] = value
|
||||
return services
|
||||
|
@ -147,48 +151,46 @@ class IngressController(Controller, ConfigCaller):
|
|||
variables = {}
|
||||
for instance in self.__corev1.list_pod_for_all_namespaces(watch=False).items:
|
||||
if (
|
||||
instance.metadata.annotations is None
|
||||
not instance.metadata.annotations
|
||||
or not "bunkerweb.io/INSTANCE" in instance.metadata.annotations
|
||||
):
|
||||
continue
|
||||
for env in instance.spec.containers[0].env:
|
||||
if env.value is None:
|
||||
variables[env.name] = ""
|
||||
else:
|
||||
variables[env.name] = env.value
|
||||
server_names = []
|
||||
if "SERVER_NAME" in variables and variables["SERVER_NAME"] != "":
|
||||
server_names = variables["SERVER_NAME"].split(" ")
|
||||
for server_name in server_names:
|
||||
service = {}
|
||||
service["SERVER_NAME"] = server_name
|
||||
for variable, value in variables.items():
|
||||
prefix = variable.split("_")[0]
|
||||
real_variable = variable.replace(f"{prefix}_", "", 1)
|
||||
if prefix == server_name and self._is_multisite_setting(real_variable):
|
||||
service[real_variable] = value
|
||||
services.append(service)
|
||||
|
||||
variables = {
|
||||
env.name: env.value or "" for env in instance.spec.containers[0].env
|
||||
}
|
||||
|
||||
if "SERVER_NAME" in variables and variables["SERVER_NAME"].strip():
|
||||
for server_name in variables["SERVER_NAME"].strip().split(" "):
|
||||
service = {"SERVER_NAME": server_name}
|
||||
for variable, value in variables.items():
|
||||
prefix = variable.split("_")[0]
|
||||
real_variable = variable.replace(f"{prefix}_", "", 1)
|
||||
if prefix == server_name and self._is_multisite_setting(
|
||||
real_variable
|
||||
):
|
||||
service[real_variable] = value
|
||||
services.append(service)
|
||||
return services
|
||||
|
||||
def get_configs(self):
|
||||
configs = {}
|
||||
for config_type in self._supported_config_types:
|
||||
configs[config_type] = {}
|
||||
configs = {config_type: {} for config_type in self._supported_config_types}
|
||||
for configmap in self.__corev1.list_config_map_for_all_namespaces(
|
||||
watch=False
|
||||
).items:
|
||||
if (
|
||||
configmap.metadata.annotations is None
|
||||
not configmap.metadata.annotations
|
||||
or "bunkerweb.io/CONFIG_TYPE" not in configmap.metadata.annotations
|
||||
):
|
||||
continue
|
||||
|
||||
config_type = configmap.metadata.annotations["bunkerweb.io/CONFIG_TYPE"]
|
||||
if config_type not in self._supported_config_types:
|
||||
self.__logger.warning(
|
||||
f"Ignoring unsupported CONFIG_TYPE {config_type} for ConfigMap {configmap.metadata.name}",
|
||||
)
|
||||
continue
|
||||
if not configmap.data:
|
||||
elif not configmap.data:
|
||||
self.__logger.warning(
|
||||
f"Ignoring blank ConfigMap {configmap.metadata.name}",
|
||||
)
|
||||
|
@ -212,7 +214,8 @@ class IngressController(Controller, ConfigCaller):
|
|||
elif watch_type == "configmap":
|
||||
what = self.__corev1.list_config_map_for_all_namespaces
|
||||
else:
|
||||
raise Exception(f"unsupported watch_type {watch_type}")
|
||||
raise Exception(f"Unsupported watch_type {watch_type}")
|
||||
|
||||
while True:
|
||||
locked = False
|
||||
error = False
|
||||
|
@ -274,11 +277,12 @@ class IngressController(Controller, ConfigCaller):
|
|||
sleep(10)
|
||||
|
||||
def apply_config(self):
|
||||
ret = self._config.apply(self._instances, self._services, configs=self._configs)
|
||||
return ret
|
||||
return self._config.apply(
|
||||
self._instances, self._services, configs=self._configs
|
||||
)
|
||||
|
||||
def process_events(self):
|
||||
watch_types = ["pod", "ingress", "configmap"]
|
||||
watch_types = ("pod", "ingress", "configmap")
|
||||
threads = [
|
||||
Thread(target=self.__watch, args=(watch_type,))
|
||||
for watch_type in watch_types
|
||||
|
|
|
@ -11,7 +11,7 @@ from logger import setup_logger
|
|||
|
||||
class SwarmController(Controller, ConfigCaller):
|
||||
def __init__(self, docker_host):
|
||||
super().__init__("swarm")
|
||||
Controller.__init__(self, "swarm")
|
||||
ConfigCaller.__init__(self)
|
||||
self.__client = DockerClient(base_url=docker_host)
|
||||
self.__internal_lock = Lock()
|
||||
|
@ -20,6 +20,9 @@ class SwarmController(Controller, ConfigCaller):
|
|||
def _get_controller_instances(self):
|
||||
return self.__client.services.list(filters={"label": "bunkerweb.INSTANCE"})
|
||||
|
||||
def _get_controller_services(self):
|
||||
return self.__client.services.list(filters={"label": "bunkerweb.SERVER_NAME"})
|
||||
|
||||
def _to_instances(self, controller_instance):
|
||||
instances = []
|
||||
instance_env = {}
|
||||
|
@ -30,19 +33,17 @@ class SwarmController(Controller, ConfigCaller):
|
|||
value = env.replace(f"{variable}=", "", 1)
|
||||
if self._is_setting(variable):
|
||||
instance_env[variable] = value
|
||||
for task in controller_instance.tasks():
|
||||
instance = {}
|
||||
instance["name"] = task["ID"]
|
||||
instance[
|
||||
"hostname"
|
||||
] = f"{controller_instance.name}.{task['NodeID']}.{task['ID']}"
|
||||
instance["health"] = task["Status"]["State"] == "running"
|
||||
instance["env"] = instance_env
|
||||
instances.append(instance)
|
||||
return instances
|
||||
|
||||
def _get_controller_services(self):
|
||||
return self.__client.services.list(filters={"label": "bunkerweb.SERVER_NAME"})
|
||||
for task in controller_instance.tasks():
|
||||
instances.append(
|
||||
{
|
||||
"name": task["ID"],
|
||||
"hostname": f"{controller_instance.name}.{task['NodeID']}.{task['ID']}",
|
||||
"health": task["Status"]["State"] == "running",
|
||||
"env": instance_env,
|
||||
}
|
||||
)
|
||||
return instances
|
||||
|
||||
def _to_services(self, controller_service):
|
||||
service = {}
|
||||
|
@ -61,22 +62,27 @@ class SwarmController(Controller, ConfigCaller):
|
|||
for instance in self.__client.services.list(
|
||||
filters={"label": "bunkerweb.INSTANCE"}
|
||||
):
|
||||
if not instance.attrs or not instance.attrs.get("Spec", {}).get(
|
||||
"TaskTemplate", {}
|
||||
).get("ContainerSpec", {}).get("Env"):
|
||||
continue
|
||||
|
||||
for env in instance.attrs["Spec"]["TaskTemplate"]["ContainerSpec"]["Env"]:
|
||||
variable = env.split("=")[0]
|
||||
value = env.replace(f"{variable}=", "", 1)
|
||||
variables[variable] = value
|
||||
server_names = []
|
||||
if "SERVER_NAME" in variables and variables["SERVER_NAME"] != "":
|
||||
server_names = variables["SERVER_NAME"].split(" ")
|
||||
for server_name in server_names:
|
||||
service = {}
|
||||
service["SERVER_NAME"] = server_name
|
||||
for variable, value in variables.items():
|
||||
prefix = variable.split("_")[0]
|
||||
real_variable = variable.replace(f"{prefix}_", "", 1)
|
||||
if prefix == server_name and self._is_multisite_setting(real_variable):
|
||||
service[real_variable] = value
|
||||
services.append(service)
|
||||
if "SERVER_NAME" in variables and variables["SERVER_NAME"].strip():
|
||||
for server_name in variables["SERVER_NAME"].strip().split(" "):
|
||||
service = {}
|
||||
service["SERVER_NAME"] = server_name
|
||||
for variable, value in variables.items():
|
||||
prefix = variable.split("_")[0]
|
||||
real_variable = variable.replace(f"{prefix}_", "", 1)
|
||||
if prefix == server_name and self._is_multisite_setting(
|
||||
real_variable
|
||||
):
|
||||
service[real_variable] = value
|
||||
services.append(service)
|
||||
return services
|
||||
|
||||
def get_configs(self):
|
||||
|
@ -86,6 +92,13 @@ class SwarmController(Controller, ConfigCaller):
|
|||
for config in self.__client.configs.list(
|
||||
filters={"label": "bunkerweb.CONFIG_TYPE"}
|
||||
):
|
||||
if (
|
||||
not config.name
|
||||
or not config.attrs
|
||||
or not config.attrs.get("Spec", {}).get("Labels", {})
|
||||
):
|
||||
continue
|
||||
|
||||
config_type = config.attrs["Spec"]["Labels"]["bunkerweb.CONFIG_TYPE"]
|
||||
config_name = config.name
|
||||
if config_type not in self._supported_config_types:
|
||||
|
@ -104,11 +117,12 @@ class SwarmController(Controller, ConfigCaller):
|
|||
return configs
|
||||
|
||||
def apply_config(self):
|
||||
ret = self._config.apply(self._instances, self._services, configs=self._configs)
|
||||
return ret
|
||||
return self._config.apply(
|
||||
self._instances, self._services, configs=self._configs
|
||||
)
|
||||
|
||||
def __event(self, event_type):
|
||||
for event in self.__client.events(decode=True, filters={"type": event_type}):
|
||||
for _ in self.__client.events(decode=True, filters={"type": event_type}):
|
||||
self.__internal_lock.acquire()
|
||||
try:
|
||||
self._instances = self.get_instances()
|
||||
|
@ -121,8 +135,7 @@ class SwarmController(Controller, ConfigCaller):
|
|||
self.__logger.info(
|
||||
"Catched Swarm event, deploying new configuration ..."
|
||||
)
|
||||
ret = self.apply_config()
|
||||
if not ret:
|
||||
if not self.apply_config():
|
||||
self.__logger.error("Error while deploying new configuration")
|
||||
else:
|
||||
self.__logger.info(
|
||||
|
@ -142,7 +155,7 @@ class SwarmController(Controller, ConfigCaller):
|
|||
self.__internal_lock.release()
|
||||
|
||||
def process_events(self):
|
||||
event_types = ["service", "config"]
|
||||
event_types = ("service", "config")
|
||||
threads = [
|
||||
Thread(target=self.__event, args=(event_type,))
|
||||
for event_type in event_types
|
||||
|
|
|
@ -24,7 +24,13 @@ logger = setup_logger("Autoconf", getenv("LOG_LEVEL", "INFO"))
|
|||
swarm = getenv("SWARM_MODE", "no") == "yes"
|
||||
kubernetes = getenv("KUBERNETES_MODE", "no") == "yes"
|
||||
docker_host = getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
|
||||
wait_retry_interval = int(getenv("WAIT_RETRY_INTERVAL", "5"))
|
||||
wait_retry_interval = getenv("WAIT_RETRY_INTERVAL", "5")
|
||||
|
||||
if not wait_retry_interval.isdigit():
|
||||
logger.error("Invalid WAIT_RETRY_INTERVAL value, must be an integer")
|
||||
_exit(1)
|
||||
|
||||
wait_retry_interval = int(wait_retry_interval)
|
||||
|
||||
|
||||
def exit_handler(signum, frame):
|
||||
|
|
|
@ -17,7 +17,7 @@ class API:
|
|||
headers = {}
|
||||
headers["User-Agent"] = "bwapi"
|
||||
headers["Host"] = self.__host
|
||||
if type(data) is dict:
|
||||
if isinstance(data, dict):
|
||||
resp = request(
|
||||
method,
|
||||
f"{self.__endpoint}{url}",
|
||||
|
@ -25,7 +25,7 @@ class API:
|
|||
timeout=timeout,
|
||||
headers=headers,
|
||||
)
|
||||
elif type(data) is bytes:
|
||||
elif isinstance(data, bytes):
|
||||
resp = request(
|
||||
method,
|
||||
f"{self.__endpoint}{url}",
|
||||
|
@ -33,7 +33,7 @@ class API:
|
|||
timeout=timeout,
|
||||
headers=headers,
|
||||
)
|
||||
elif files is not None:
|
||||
elif files:
|
||||
resp = request(
|
||||
method,
|
||||
f"{self.__endpoint}{url}",
|
||||
|
@ -41,7 +41,7 @@ class API:
|
|||
timeout=timeout,
|
||||
headers=headers,
|
||||
)
|
||||
elif data is None:
|
||||
elif not data:
|
||||
resp = request(
|
||||
method, f"{self.__endpoint}{url}", timeout=timeout, headers=headers
|
||||
)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from os.path import isfile
|
||||
from pathlib import Path
|
||||
from dotenv import dotenv_values
|
||||
from docker import DockerClient
|
||||
from kubernetes import client, config
|
||||
|
@ -34,16 +34,15 @@ class CLI(ApiCaller):
|
|||
super().__init__(self.__get_apis())
|
||||
|
||||
def __detect_integration(self):
|
||||
ret = "unknown"
|
||||
distrib = ""
|
||||
if isfile("/etc/os-release"):
|
||||
if Path("/etc/os-release").is_file():
|
||||
with open("/etc/os-release", "r") as f:
|
||||
if "Alpine" in f.read():
|
||||
distrib = "alpine"
|
||||
else:
|
||||
distrib = "other"
|
||||
# Docker case
|
||||
if distrib == "alpine" and isfile("/usr/sbin/nginx"):
|
||||
if distrib == "alpine" and Path("/usr/sbin/nginx").is_file():
|
||||
return "docker"
|
||||
# Linux case
|
||||
if distrib == "other":
|
||||
|
@ -58,7 +57,7 @@ class CLI(ApiCaller):
|
|||
if distrib == "alpine":
|
||||
return "autoconf"
|
||||
|
||||
raise Exception("can't detect integration")
|
||||
raise Exception("Can't detect integration")
|
||||
|
||||
def __get_apis(self):
|
||||
# Docker case
|
||||
|
|
|
@ -5,13 +5,15 @@ from json import loads
|
|||
from logging import Logger
|
||||
from os import listdir
|
||||
from os.path import basename, dirname
|
||||
from pathlib import Path
|
||||
from re import search as re_search
|
||||
from sys import path as sys_path
|
||||
from tarfile import open as tar_open
|
||||
from traceback import format_exc
|
||||
from typing import Optional, Union
|
||||
|
||||
sys_path.append("/usr/share/bunkerweb/utils")
|
||||
if "/usr/share/bunkerweb/utils" not in sys_path:
|
||||
sys_path.append("/usr/share/bunkerweb/utils")
|
||||
|
||||
|
||||
class Configurator:
|
||||
|
@ -80,41 +82,39 @@ class Configurator:
|
|||
servers[server_name] = names
|
||||
return servers
|
||||
|
||||
def __load_settings(self, path):
|
||||
with open(path) as f:
|
||||
return loads(f.read())
|
||||
def __load_settings(self, path) -> dict:
|
||||
return loads(Path(path).read_text())
|
||||
|
||||
def __load_plugins(self, path, type: str = "other"):
|
||||
def __load_plugins(self, path, _type: str = "other"):
|
||||
plugins = {}
|
||||
files = glob(f"{path}/*/plugin.json")
|
||||
for file in files:
|
||||
try:
|
||||
with open(file) as f:
|
||||
data = loads(f.read())
|
||||
data = self.__load_settings(file)
|
||||
|
||||
if type == "plugins":
|
||||
plugin_content = BytesIO()
|
||||
with tar_open(fileobj=plugin_content, mode="w:gz") as tar:
|
||||
tar.add(
|
||||
dirname(file),
|
||||
arcname=basename(dirname(file)),
|
||||
recursive=True,
|
||||
)
|
||||
plugin_content.seek(0)
|
||||
value = plugin_content.getvalue()
|
||||
|
||||
self.__plugins_settings.append(
|
||||
data
|
||||
| {
|
||||
"external": path.startswith("/etc/bunkerweb/plugins"),
|
||||
"page": "ui" in listdir(dirname(file)),
|
||||
"method": "manual",
|
||||
"data": value,
|
||||
"checksum": sha256(value).hexdigest(),
|
||||
}
|
||||
if _type == "plugins":
|
||||
plugin_content = BytesIO()
|
||||
with tar_open(fileobj=plugin_content, mode="w:gz") as tar:
|
||||
tar.add(
|
||||
dirname(file),
|
||||
arcname=basename(dirname(file)),
|
||||
recursive=True,
|
||||
)
|
||||
plugin_content.seek(0)
|
||||
value = plugin_content.getvalue()
|
||||
|
||||
plugins.update(data["settings"])
|
||||
self.__plugins_settings.append(
|
||||
data
|
||||
| {
|
||||
"external": path.startswith("/etc/bunkerweb/plugins"),
|
||||
"page": "ui" in listdir(dirname(file)),
|
||||
"method": "manual",
|
||||
"data": value,
|
||||
"checksum": sha256(value).hexdigest(),
|
||||
}
|
||||
)
|
||||
|
||||
plugins.update(data["settings"])
|
||||
except:
|
||||
self.__logger.error(
|
||||
f"Exception while loading JSON from {file} : {format_exc()}",
|
||||
|
@ -128,7 +128,7 @@ class Configurator:
|
|||
lines = f.readlines()
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if line.startswith("#") or line == "" or not "=" in line:
|
||||
if not line or line.startswith("#") or not "=" in line:
|
||||
continue
|
||||
var = line.split("=")[0]
|
||||
value = line[len(var) + 1 :]
|
||||
|
@ -182,9 +182,9 @@ class Configurator:
|
|||
where, real_var = self.__find_var(variable)
|
||||
if not where:
|
||||
return False, f"variable name {variable} doesn't exist"
|
||||
if not "regex" in where[real_var]:
|
||||
elif not "regex" in where[real_var]:
|
||||
return False, f"missing regex for variable {variable}"
|
||||
if not re_search(where[real_var]["regex"], value):
|
||||
elif not re_search(where[real_var]["regex"], value):
|
||||
return (
|
||||
False,
|
||||
f"value {value} doesn't match regex {where[real_var]['regex']}",
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
from copy import deepcopy
|
||||
from glob import glob
|
||||
from importlib import import_module
|
||||
from os.path import basename, dirname, isdir
|
||||
from os.path import basename, dirname
|
||||
from pathlib import Path
|
||||
from random import choice
|
||||
from string import ascii_letters, digits
|
||||
|
@ -34,7 +34,7 @@ class Templator:
|
|||
def __load_jinja_env(self):
|
||||
searchpath = [self.__templates]
|
||||
for subpath in glob(f"{self.__core}/*") + glob(f"{self.__plugins}/*"):
|
||||
if isdir(subpath):
|
||||
if Path(subpath).is_dir():
|
||||
searchpath.append(f"{subpath}/confs")
|
||||
return Environment(
|
||||
loader=FileSystemLoader(searchpath=searchpath),
|
||||
|
@ -135,9 +135,11 @@ class Templator:
|
|||
Path(dirname(f"{real_output}{real_name}")).mkdir(parents=True, exist_ok=True)
|
||||
Path(f"{real_output}{real_name}").write_text(jinja_template.render(real_config))
|
||||
|
||||
@staticmethod
|
||||
def is_custom_conf(path):
|
||||
return glob(f"{path}/*.conf")
|
||||
|
||||
@staticmethod
|
||||
def has_variable(all_vars, variable, value):
|
||||
if variable in all_vars and all_vars[variable] == value:
|
||||
return True
|
||||
|
@ -150,10 +152,12 @@ class Templator:
|
|||
return True
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def random(nb):
|
||||
characters = ascii_letters + digits
|
||||
return "".join(choice(characters) for _ in range(nb))
|
||||
|
||||
@staticmethod
|
||||
def read_lines(file):
|
||||
try:
|
||||
with open(file, "r") as f:
|
||||
|
|
|
@ -2,22 +2,20 @@
|
|||
|
||||
from argparse import ArgumentParser
|
||||
from glob import glob
|
||||
from os import R_OK, W_OK, X_OK, access, getenv, path, remove, unlink
|
||||
from os.path import exists, isdir, isfile, islink
|
||||
from os import R_OK, W_OK, X_OK, access, getenv
|
||||
from pathlib import Path
|
||||
from shutil import rmtree
|
||||
from subprocess import DEVNULL, STDOUT, run
|
||||
from sys import exit as sys_exit, path as sys_path
|
||||
from time import sleep
|
||||
from traceback import format_exc
|
||||
|
||||
|
||||
sys_path.extend(
|
||||
(
|
||||
"/usr/share/bunkerweb/deps/python",
|
||||
"/usr/share/bunkerweb/utils",
|
||||
"/usr/share/bunkerweb/api",
|
||||
)
|
||||
)
|
||||
if "/usr/share/bunkerweb/deps/python" not in sys_path:
|
||||
sys_path.append("/usr/share/bunkerweb/deps/python")
|
||||
if "/usr/share/bunkerweb/utils" not in sys_path:
|
||||
sys_path.append("/usr/share/bunkerweb/utils")
|
||||
if "/usr/share/bunkerweb/api" not in sys_path:
|
||||
sys_path.append("/usr/share/bunkerweb/api")
|
||||
|
||||
from logger import setup_logger
|
||||
from Configurator import Configurator
|
||||
|
@ -89,9 +87,8 @@ if __name__ == "__main__":
|
|||
integration = "Swarm"
|
||||
elif getenv("AUTOCONF_MODE", "no") == "yes":
|
||||
integration = "Autoconf"
|
||||
elif exists("/usr/share/bunkerweb/INTEGRATION"):
|
||||
with open("/usr/share/bunkerweb/INTEGRATION", "r") as f:
|
||||
integration = f.read().strip()
|
||||
elif Path("/usr/share/bunkerweb/INTEGRATION").exists():
|
||||
integration = Path("/usr/share/bunkerweb/INTEGRATION").read_text().strip()
|
||||
|
||||
if args.variables:
|
||||
logger.info(f"Variables : {args.variables}")
|
||||
|
@ -102,25 +99,25 @@ if __name__ == "__main__":
|
|||
paths_rx = [args.core, args.plugins, args.templates]
|
||||
paths_rwx = [args.output]
|
||||
for file in files:
|
||||
if not path.exists(file):
|
||||
if not Path(file).is_file():
|
||||
logger.error(f"Missing file : {file}")
|
||||
sys_exit(1)
|
||||
if not access(file, R_OK):
|
||||
elif not access(file, R_OK):
|
||||
logger.error(f"Can't read file : {file}")
|
||||
sys_exit(1)
|
||||
for _path in paths_rx + paths_rwx:
|
||||
if not path.isdir(_path):
|
||||
logger.error(f"Missing directory : {_path}")
|
||||
for path in paths_rx + paths_rwx:
|
||||
if not Path(path).is_dir():
|
||||
logger.error(f"Missing directory : {path}")
|
||||
sys_exit(1)
|
||||
if not access(_path, R_OK | X_OK):
|
||||
elif not access(path, R_OK | X_OK):
|
||||
logger.error(
|
||||
f"Missing RX rights on directory : {_path}",
|
||||
f"Missing RX rights on directory : {path}",
|
||||
)
|
||||
sys_exit(1)
|
||||
for _path in paths_rwx:
|
||||
if not access(_path, W_OK):
|
||||
for path in paths_rwx:
|
||||
if not access(path, W_OK):
|
||||
logger.error(
|
||||
f"Missing W rights on directory : {_path}",
|
||||
f"Missing W rights on directory : {path}",
|
||||
)
|
||||
sys_exit(1)
|
||||
|
||||
|
@ -131,7 +128,9 @@ if __name__ == "__main__":
|
|||
)
|
||||
config = config.get_config()
|
||||
else:
|
||||
sys_path.append("/usr/share/bunkerweb/db")
|
||||
if "/usr/share/bunkerweb/db" not in sys_path:
|
||||
sys_path.append("/usr/share/bunkerweb/db")
|
||||
|
||||
from Database import Database
|
||||
|
||||
db = Database(
|
||||
|
@ -144,11 +143,9 @@ if __name__ == "__main__":
|
|||
logger.info("Removing old files ...")
|
||||
files = glob(f"{args.output}/*")
|
||||
for file in files:
|
||||
if islink(file):
|
||||
unlink(file)
|
||||
elif isfile(file):
|
||||
remove(file)
|
||||
elif isdir(file):
|
||||
if Path(file).is_symlink() or Path(file).is_file():
|
||||
Path(file).unlink()
|
||||
elif Path(file).is_dir():
|
||||
rmtree(file, ignore_errors=False)
|
||||
|
||||
# Render the templates
|
||||
|
@ -165,7 +162,7 @@ if __name__ == "__main__":
|
|||
|
||||
if integration == "Linux":
|
||||
retries = 0
|
||||
while not exists("/var/tmp/bunkerweb/nginx.pid"):
|
||||
while not Path("/var/tmp/bunkerweb/nginx.pid").exists():
|
||||
if retries == 5:
|
||||
logger.error(
|
||||
"BunkerWeb's nginx didn't start in time.",
|
||||
|
@ -186,7 +183,7 @@ if __name__ == "__main__":
|
|||
logger.info("Successfully reloaded nginx")
|
||||
|
||||
except SystemExit as e:
|
||||
sys_exit(e)
|
||||
raise e
|
||||
except:
|
||||
logger.error(
|
||||
f"Exception while executing generator : {format_exc()}",
|
||||
|
|
|
@ -4,8 +4,9 @@ from argparse import ArgumentParser
|
|||
from glob import glob
|
||||
from itertools import chain
|
||||
from json import loads
|
||||
from os import R_OK, X_OK, access, environ, getenv, listdir, path, walk
|
||||
from os.path import exists, join
|
||||
from os import R_OK, X_OK, access, environ, getenv, listdir, walk
|
||||
from os.path import join
|
||||
from pathlib import Path
|
||||
from re import compile as re_compile
|
||||
from sys import exit as sys_exit, path as sys_path
|
||||
from time import sleep
|
||||
|
@ -30,7 +31,7 @@ from Configurator import Configurator
|
|||
from API import API
|
||||
|
||||
custom_confs_rx = re_compile(
|
||||
r"^([0-9a-z\.-]*)_?CUSTOM_CONF_(HTTP|DEFAULT_SERVER_HTTP|SERVER_HTTP|MODSEC_CRS|MODSEC)_(.+)$"
|
||||
r"^([0-9a-z\.-]*)_?CUSTOM_CONF_(HTTP|SERVER_STREAM|STREAM|DEFAULT_SERVER_HTTP|SERVER_HTTP|MODSEC_CRS|MODSEC)_(.+)$"
|
||||
)
|
||||
|
||||
|
||||
|
@ -57,7 +58,7 @@ def get_instance_configs_and_apis(instance: Any, db, _type="Docker"):
|
|||
else:
|
||||
tmp_config[splitted[0]] = splitted[1]
|
||||
|
||||
if db is None and splitted[0] == "DATABASE_URI":
|
||||
if not db and splitted[0] == "DATABASE_URI":
|
||||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=splitted[1],
|
||||
|
@ -79,7 +80,13 @@ def get_instance_configs_and_apis(instance: Any, db, _type="Docker"):
|
|||
|
||||
if __name__ == "__main__":
|
||||
logger = setup_logger("Generator", getenv("LOG_LEVEL", "INFO"))
|
||||
wait_retry_interval = int(getenv("WAIT_RETRY_INTERVAL", "5"))
|
||||
wait_retry_interval = getenv("WAIT_RETRY_INTERVAL", "5")
|
||||
|
||||
if not wait_retry_interval.isdigit():
|
||||
logger.error("Invalid WAIT_RETRY_INTERVAL value, must be an integer")
|
||||
sys_exit(1)
|
||||
|
||||
wait_retry_interval = int(wait_retry_interval)
|
||||
|
||||
try:
|
||||
# Parse arguments
|
||||
|
@ -133,9 +140,8 @@ if __name__ == "__main__":
|
|||
integration = "Swarm"
|
||||
elif getenv("AUTOCONF_MODE", "no") == "yes":
|
||||
integration = "Autoconf"
|
||||
elif exists("/usr/share/bunkerweb/INTEGRATION"):
|
||||
with open("/usr/share/bunkerweb/INTEGRATION", "r") as f:
|
||||
integration = f.read().strip()
|
||||
elif Path("/usr/share/bunkerweb/INTEGRATION").is_file():
|
||||
integration = Path("/usr/share/bunkerweb/INTEGRATION").read_text().strip()
|
||||
|
||||
if args.init:
|
||||
logger.info(f"Detected {integration} integration")
|
||||
|
@ -146,7 +152,7 @@ if __name__ == "__main__":
|
|||
|
||||
plugins = args.plugins
|
||||
plugins_settings = None
|
||||
if not exists("/usr/sbin/nginx") and args.method == "ui":
|
||||
if not Path("/usr/sbin/nginx").exists() and args.method == "ui":
|
||||
db = Database(logger)
|
||||
plugins = {}
|
||||
plugins_settings = []
|
||||
|
@ -159,19 +165,19 @@ if __name__ == "__main__":
|
|||
files = [args.settings] + ([args.variables] if args.variables else [])
|
||||
paths_rx = [args.core, args.plugins]
|
||||
for file in files:
|
||||
if not path.exists(file):
|
||||
if not Path(file).is_file():
|
||||
logger.error(f"Missing file : {file}")
|
||||
sys_exit(1)
|
||||
if not access(file, R_OK):
|
||||
logger.error(f"Can't read file : {file}")
|
||||
sys_exit(1)
|
||||
for _path in paths_rx:
|
||||
if not path.isdir(_path):
|
||||
logger.error(f"Missing directory : {_path}")
|
||||
for path in paths_rx:
|
||||
if not Path(path).is_dir():
|
||||
logger.error(f"Missing directory : {path}")
|
||||
sys_exit(1)
|
||||
if not access(_path, R_OK | X_OK):
|
||||
if not access(path, R_OK | X_OK):
|
||||
logger.error(
|
||||
f"Missing RX rights on directory : {_path}",
|
||||
f"Missing RX rights on directory : {path}",
|
||||
)
|
||||
sys_exit(1)
|
||||
|
||||
|
@ -181,13 +187,12 @@ if __name__ == "__main__":
|
|||
files = glob(f"{args.core}/*/plugin.json")
|
||||
for file in files:
|
||||
try:
|
||||
with open(file) as f:
|
||||
core_plugin = loads(f.read())
|
||||
core_plugin = loads(Path(file).read_text())
|
||||
|
||||
if core_plugin["order"] not in core_plugins:
|
||||
core_plugins[core_plugin["order"]] = []
|
||||
if core_plugin["order"] not in core_plugins:
|
||||
core_plugins[core_plugin["order"]] = []
|
||||
|
||||
core_plugins[core_plugin["order"]].append(core_plugin)
|
||||
core_plugins[core_plugin["order"]].append(core_plugin)
|
||||
except:
|
||||
logger.error(
|
||||
f"Exception while loading JSON from {file} : {format_exc()}",
|
||||
|
@ -218,7 +223,7 @@ if __name__ == "__main__":
|
|||
)
|
||||
config_files = config.get_config()
|
||||
custom_confs = [
|
||||
{"value": v, "exploded": custom_confs_rx.search(k).groups()}
|
||||
{"value": v, "exploded": custom_confs_rx.search(k).groups()} # type: ignore
|
||||
for k, v in environ.items()
|
||||
if custom_confs_rx.match(k)
|
||||
]
|
||||
|
@ -231,19 +236,18 @@ if __name__ == "__main__":
|
|||
):
|
||||
path_exploded = root.split("/")
|
||||
for file in files:
|
||||
with open(join(root, file), "r") as f:
|
||||
custom_confs.append(
|
||||
{
|
||||
"value": f.read(),
|
||||
"exploded": (
|
||||
f"{path_exploded.pop()}"
|
||||
if path_exploded[-1] not in root_dirs
|
||||
else "",
|
||||
path_exploded[-1],
|
||||
file.replace(".conf", ""),
|
||||
),
|
||||
}
|
||||
)
|
||||
custom_confs.append(
|
||||
{
|
||||
"value": Path(join(root, file)).read_text(),
|
||||
"exploded": (
|
||||
f"{path_exploded.pop()}"
|
||||
if path_exploded[-1] not in root_dirs
|
||||
else "",
|
||||
path_exploded[-1],
|
||||
file.replace(".conf", ""),
|
||||
),
|
||||
}
|
||||
)
|
||||
else:
|
||||
docker_client = DockerClient(
|
||||
base_url=getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
|
||||
|
@ -278,7 +282,7 @@ if __name__ == "__main__":
|
|||
else:
|
||||
tmp_config[splitted[0]] = splitted[1]
|
||||
|
||||
if db is None and splitted[0] == "DATABASE_URI":
|
||||
if not db and splitted[0] == "DATABASE_URI":
|
||||
db = Database(
|
||||
logger,
|
||||
sqlalchemy_string=splitted[1],
|
||||
|
@ -295,11 +299,11 @@ if __name__ == "__main__":
|
|||
)
|
||||
)
|
||||
|
||||
if db is None:
|
||||
if not db:
|
||||
db = Database(logger)
|
||||
|
||||
# Compute the config
|
||||
if config_files is None:
|
||||
if not config_files:
|
||||
logger.info("Computing config ...")
|
||||
config = Configurator(
|
||||
args.settings,
|
||||
|
@ -329,17 +333,17 @@ if __name__ == "__main__":
|
|||
f"Exception while initializing database : {err}",
|
||||
)
|
||||
sys_exit(1)
|
||||
elif ret is False:
|
||||
elif not ret:
|
||||
logger.info(
|
||||
"Database tables are already initialized, skipping creation ...",
|
||||
)
|
||||
else:
|
||||
logger.info("Database tables initialized")
|
||||
|
||||
with open("/usr/share/bunkerweb/VERSION", "r") as f:
|
||||
version = f.read().strip()
|
||||
|
||||
err = db.initialize_db(version=version, integration=integration)
|
||||
err = db.initialize_db(
|
||||
version=Path("/usr/share/bunkerweb/VERSION").read_text().strip(),
|
||||
integration=integration,
|
||||
)
|
||||
|
||||
if err:
|
||||
logger.error(
|
||||
|
@ -391,7 +395,7 @@ if __name__ == "__main__":
|
|||
if err:
|
||||
logger.warning(err)
|
||||
except SystemExit as e:
|
||||
sys_exit(e)
|
||||
raise e
|
||||
except:
|
||||
logger.error(
|
||||
f"Exception while executing config saver : {format_exc()}",
|
||||
|
|
|
@ -10,9 +10,10 @@ from stat import (
|
|||
S_IXOTH,
|
||||
S_IXUSR,
|
||||
)
|
||||
from typing import List
|
||||
|
||||
|
||||
def has_permissions(path, need_permissions):
|
||||
def has_permissions(path: str, need_permissions: List[str]):
|
||||
uid = geteuid()
|
||||
gid = getegid()
|
||||
statinfo = stat(path)
|
||||
|
@ -37,8 +38,7 @@ def has_permissions(path, need_permissions):
|
|||
permissions["W"] = True
|
||||
if statinfo.st_mode & S_IXOTH:
|
||||
permissions["X"] = True
|
||||
list_permissions = [permission for permission in need_permissions]
|
||||
for need_permission in list_permissions:
|
||||
for need_permission in need_permissions:
|
||||
if not permissions[need_permission]:
|
||||
return False
|
||||
return True
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
from io import BytesIO
|
||||
from os import environ, getenv
|
||||
from os.path import sep
|
||||
from sys import path as sys_path
|
||||
from tarfile import open as taropen
|
||||
from typing import Optional
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from json import loads
|
||||
from json import JSONDecodeError, load
|
||||
from glob import glob
|
||||
from re import match
|
||||
from traceback import format_exc
|
||||
|
@ -9,15 +9,19 @@ from logger import setup_logger
|
|||
class ConfigCaller:
|
||||
def __init__(self):
|
||||
self.__logger = setup_logger("Config", "INFO")
|
||||
with open("/usr/share/bunkerweb/settings.json") as f:
|
||||
self._settings = loads(f.read())
|
||||
with open("/usr/share/bunkerweb/settings.json", "r") as f:
|
||||
self._settings = load(f)
|
||||
for plugin in glob("/usr/share/bunkerweb/core/*/plugin.json") + glob(
|
||||
"/etc/bunkerweb/plugins/*/plugin.json"
|
||||
):
|
||||
with open(plugin) as f:
|
||||
with open(plugin, "r") as f:
|
||||
try:
|
||||
self._settings.update(loads(f.read())["settings"])
|
||||
except:
|
||||
self._settings.update(load(f)["settings"])
|
||||
except KeyError:
|
||||
self.__logger.error(
|
||||
f'Error while loading plugin metadata file at {plugin} : missing "settings" key',
|
||||
)
|
||||
except JSONDecodeError:
|
||||
self.__logger.error(
|
||||
f"Exception while loading plugin metadata file at {plugin} :\n{format_exc()}",
|
||||
)
|
||||
|
@ -26,24 +30,24 @@ class ConfigCaller:
|
|||
return setting in self._settings
|
||||
|
||||
def _is_global_setting(self, setting):
|
||||
if setting in self._settings:
|
||||
if self._is_setting(setting):
|
||||
return self._settings[setting]["context"] == "global"
|
||||
if match("^.+_\d+$", setting):
|
||||
multiple_setting = "_".join(setting.split("_")[0:-1])
|
||||
elif match(r"^.+_\d+$", setting):
|
||||
multiple_setting = "_".join(setting.split("_")[:-1])
|
||||
return (
|
||||
multiple_setting in self._settings
|
||||
self._is_setting(multiple_setting)
|
||||
and self._settings[multiple_setting]["context"] == "global"
|
||||
and "multiple" in self._settings[multiple_setting]
|
||||
)
|
||||
return False
|
||||
|
||||
def _is_multisite_setting(self, setting):
|
||||
if setting in self._settings:
|
||||
if self._is_setting(setting):
|
||||
return self._settings[setting]["context"] == "multisite"
|
||||
if match("^.+_\d+$", setting):
|
||||
if match(r"^.+_\d+$", setting):
|
||||
multiple_setting = "_".join(setting.split("_")[0:-1])
|
||||
return (
|
||||
multiple_setting in self._settings
|
||||
self._is_setting(multiple_setting)
|
||||
and self._settings[multiple_setting]["context"] == "multisite"
|
||||
and "multiple" in self._settings[multiple_setting]
|
||||
)
|
||||
|
|
|
@ -2,7 +2,7 @@ from contextlib import suppress
|
|||
from datetime import datetime
|
||||
from hashlib import sha512
|
||||
from json import dumps, loads
|
||||
from os import path, remove
|
||||
from pathlib import Path
|
||||
from shutil import copy
|
||||
from traceback import format_exc
|
||||
|
||||
|
@ -18,13 +18,11 @@ from traceback import format_exc
|
|||
def is_cached_file(file, expire):
|
||||
is_cached = False
|
||||
try:
|
||||
if not path.isfile(file):
|
||||
if not Path(f"{file}.md").is_file():
|
||||
return False
|
||||
if not path.isfile(f"{file}.md"):
|
||||
return False
|
||||
with open(f"{file}.md", "r") as f:
|
||||
cached_time = loads(f.read())["date"]
|
||||
current_time = datetime.timestamp(datetime.now())
|
||||
|
||||
cached_time = loads(Path(f"{file}.md").read_text())["date"]
|
||||
current_time = datetime.now().timestamp()
|
||||
if current_time < cached_time:
|
||||
return False
|
||||
diff_time = current_time - cached_time
|
||||
|
@ -52,8 +50,7 @@ def file_hash(file):
|
|||
|
||||
def cache_hash(cache):
|
||||
with suppress(BaseException):
|
||||
with open(f"{cache}.md", "r") as f:
|
||||
return loads(f.read())["checksum"]
|
||||
return loads(Path(f"{cache}.md").read_text())["checksum"]
|
||||
return None
|
||||
|
||||
|
||||
|
@ -61,10 +58,9 @@ def cache_file(file, cache, _hash):
|
|||
ret, err = True, "success"
|
||||
try:
|
||||
copy(file, cache)
|
||||
remove(file)
|
||||
with open(f"{cache}.md", "w") as f:
|
||||
md = {"date": datetime.timestamp(datetime.now()), "checksum": _hash}
|
||||
f.write(dumps(md))
|
||||
Path(file).unlink()
|
||||
md = {"date": datetime.timestamp(datetime.now()), "checksum": _hash}
|
||||
Path(cache).write_text(dumps(md))
|
||||
except:
|
||||
return False, f"exception :\n{format_exc()}"
|
||||
return ret, err
|
||||
|
|
|
@ -3,6 +3,7 @@ from glob import glob
|
|||
from json import loads
|
||||
from logging import Logger
|
||||
from os import cpu_count, environ, getenv
|
||||
from pathlib import Path
|
||||
from subprocess import DEVNULL, PIPE, STDOUT, run
|
||||
from threading import Lock, Thread
|
||||
from schedule import (
|
||||
|
@ -48,13 +49,12 @@ class JobScheduler(ApiCaller):
|
|||
plugin_name = plugin.split("/")[-2]
|
||||
jobs[plugin_name] = []
|
||||
try:
|
||||
with open(f"{plugin}/plugin.json") as f:
|
||||
plugin_data = loads(f.read())
|
||||
if not "jobs" in plugin_data:
|
||||
continue
|
||||
for job in plugin_data["jobs"]:
|
||||
job["path"] = plugin
|
||||
jobs[plugin_name] = plugin_data["jobs"]
|
||||
plugin_data = loads(Path(f"{plugin}/plugin.json").read_text())
|
||||
if not "jobs" in plugin_data:
|
||||
continue
|
||||
for job in plugin_data["jobs"]:
|
||||
job["path"] = plugin
|
||||
jobs[plugin_name] = plugin_data["jobs"]
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
except:
|
||||
|
@ -171,7 +171,7 @@ class JobScheduler(ApiCaller):
|
|||
success = False
|
||||
if reload:
|
||||
try:
|
||||
if len(self._get_apis()) > 0:
|
||||
if self._get_apis():
|
||||
self.__logger.info("Sending /data/cache folder ...")
|
||||
if not self._send_files("/data/cache", "/cache"):
|
||||
success = False
|
||||
|
|
|
@ -22,14 +22,14 @@ from time import sleep
|
|||
from traceback import format_exc
|
||||
from typing import Any, Dict, List
|
||||
|
||||
sys_path.extend(
|
||||
(
|
||||
"/usr/share/bunkerweb/deps/python",
|
||||
"/usr/share/bunkerweb/utils",
|
||||
"/usr/share/bunkerweb/api",
|
||||
"/usr/share/bunkerweb/db",
|
||||
)
|
||||
)
|
||||
if "/usr/share/bunkerweb/deps/python" not in sys_path:
|
||||
sys_path.append("/usr/share/bunkerweb/deps/python")
|
||||
if "/usr/share/bunkerweb/utils" not in sys_path:
|
||||
sys_path.append("/usr/share/bunkerweb/utils")
|
||||
if "/usr/share/bunkerweb/api" not in sys_path:
|
||||
sys_path.append("/usr/share/bunkerweb/api")
|
||||
if "/usr/share/bunkerweb/db" not in sys_path:
|
||||
sys_path.append("/usr/share/bunkerweb/db")
|
||||
|
||||
from dotenv import dotenv_values
|
||||
|
||||
|
|
|
@ -1,3 +1,21 @@
|
|||
from sys import path as sys_path, modules as sys_modules
|
||||
from pathlib import Path
|
||||
|
||||
if Path("/etc/os-release").is_file():
|
||||
with open("/etc/os-release", "r") as f:
|
||||
if (
|
||||
"/usr/share/bunkerweb/deps/python" not in sys_path
|
||||
and "Alpine" not in f.read()
|
||||
):
|
||||
sys_path.append("/usr/share/bunkerweb/deps/python")
|
||||
|
||||
if "/usr/share/bunkerweb/utils" not in sys_path:
|
||||
sys_path.append("/usr/share/bunkerweb/utils")
|
||||
if "/usr/share/bunkerweb/api" not in sys_path:
|
||||
sys_path.append("/usr/share/bunkerweb/api")
|
||||
if "/usr/share/bunkerweb/db" not in sys_path:
|
||||
sys_path.append("/usr/share/bunkerweb/db")
|
||||
|
||||
from hashlib import sha256
|
||||
from bs4 import BeautifulSoup
|
||||
from contextlib import suppress
|
||||
|
@ -36,13 +54,11 @@ from kubernetes import client as kube_client
|
|||
from kubernetes.client.exceptions import ApiException as kube_ApiException
|
||||
from os import _exit, chmod, getenv, getpid, listdir, walk
|
||||
from os.path import join
|
||||
from pathlib import Path
|
||||
from re import match as re_match
|
||||
from requests import get
|
||||
from shutil import move, rmtree, copytree, chown
|
||||
from signal import SIGINT, signal, SIGTERM
|
||||
from subprocess import PIPE, Popen, call
|
||||
from sys import path as sys_path, modules as sys_modules
|
||||
from tarfile import CompressionError, HeaderError, ReadError, TarError, open as tar_open
|
||||
from threading import Thread
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
@ -51,14 +67,6 @@ from traceback import format_exc
|
|||
from typing import Optional
|
||||
from zipfile import BadZipFile, ZipFile
|
||||
|
||||
sys_path.extend(
|
||||
(
|
||||
"/usr/share/bunkerweb/utils",
|
||||
"/usr/share/bunkerweb/api",
|
||||
"/usr/share/bunkerweb/db",
|
||||
)
|
||||
)
|
||||
|
||||
from src.Instances import Instances
|
||||
from src.ConfigFiles import ConfigFiles
|
||||
from src.Config import Config
|
||||
|
|
Loading…
Reference in New Issue