autoconf - init work on swarm mode
This commit is contained in:
parent
e12b656bd5
commit
e04c783d1e
|
@ -16,10 +16,7 @@ COPY lua/ /opt/lua
|
|||
COPY prepare.sh /tmp/prepare.sh
|
||||
RUN chmod +x /tmp/prepare.sh && /tmp/prepare.sh && rm -f /tmp/prepare.sh
|
||||
|
||||
# Fix CVE-2020-28928, CVE-2020-8231 & CVE-2020-1971
|
||||
RUN apk --no-cache add "musl-utils>1.1.24-r2" "curl>7.67.0-r1" "libcrypto1.1>1.1.1g-r0" "libssl1.1>1.1.1g-r0"
|
||||
|
||||
VOLUME /www /http-confs /server-confs /modsec-confs /modsec-crs-confs /cache
|
||||
VOLUME /www /http-confs /server-confs /modsec-confs /modsec-crs-confs /cache /pre-server-confs
|
||||
|
||||
EXPOSE 8080/tcp 8443/tcp
|
||||
|
||||
|
|
|
@ -16,10 +16,7 @@ COPY lua/ /opt/lua
|
|||
COPY prepare.sh /tmp/prepare.sh
|
||||
RUN chmod +x /tmp/prepare.sh && /tmp/prepare.sh && rm -f /tmp/prepare.sh
|
||||
|
||||
# Fix CVE-2020-28928, CVE-2020-8231 & CVE-2020-1971
|
||||
RUN apk --no-cache add "musl-utils>1.1.24-r2" "curl>7.67.0-r1" "libcrypto1.1>1.1.1g-r0" "libssl1.1>1.1.1g-r0"
|
||||
|
||||
VOLUME /www /http-confs /server-confs /modsec-confs /modsec-crs-confs /cache
|
||||
VOLUME /www /http-confs /server-confs /modsec-confs /modsec-crs-confs /cache /pre-server-confs
|
||||
|
||||
EXPOSE 8080/tcp 8443/tcp
|
||||
|
||||
|
|
|
@ -23,10 +23,7 @@ COPY lua/ /opt/lua
|
|||
COPY prepare.sh /tmp/prepare.sh
|
||||
RUN chmod +x /tmp/prepare.sh && /tmp/prepare.sh && rm -f /tmp/prepare.sh
|
||||
|
||||
# Fix CVE-2020-28928, CVE-2020-8231 & CVE-2020-1971
|
||||
RUN apk --no-cache add "musl-utils>1.1.24-r2" "curl>7.67.0-r1" "libcrypto1.1>1.1.1g-r0" "libssl1.1>1.1.1g-r0"
|
||||
|
||||
VOLUME /www /http-confs /server-confs /modsec-confs /modsec-crs-confs /cache
|
||||
VOLUME /www /http-confs /server-confs /modsec-confs /modsec-crs-confs /cache /pre-server-confs
|
||||
|
||||
EXPOSE 8080/tcp 8443/tcp
|
||||
|
||||
|
|
|
@ -23,10 +23,7 @@ COPY lua/ /opt/lua
|
|||
COPY prepare.sh /tmp/prepare.sh
|
||||
RUN chmod +x /tmp/prepare.sh && /tmp/prepare.sh && rm -f /tmp/prepare.sh
|
||||
|
||||
# Fix CVE-2020-28928, CVE-2020-8231 & CVE-2020-1971
|
||||
RUN apk --no-cache add "musl-utils>1.1.24-r2" "curl>7.67.0-r1" "libcrypto1.1>1.1.1g-r0" "libssl1.1>1.1.1g-r0"
|
||||
|
||||
VOLUME /www /http-confs /server-confs /modsec-confs /modsec-crs-confs /cache
|
||||
VOLUME /www /http-confs /server-confs /modsec-confs /modsec-crs-confs /cache /pre-server-confs
|
||||
|
||||
EXPOSE 8080/tcp 8443/tcp
|
||||
|
||||
|
|
|
@ -16,10 +16,7 @@ COPY lua/ /opt/lua
|
|||
COPY prepare.sh /tmp/prepare.sh
|
||||
RUN chmod +x /tmp/prepare.sh && /tmp/prepare.sh && rm -f /tmp/prepare.sh
|
||||
|
||||
# Fix CVE-2020-28928, CVE-2020-8231 & CVE-2020-1971
|
||||
RUN apk --no-cache add "musl-utils>1.1.24-r2" "curl>7.67.0-r1" "libcrypto1.1>1.1.1g-r0" "libssl1.1>1.1.1g-r0"
|
||||
|
||||
VOLUME /www /http-confs /server-confs /modsec-confs /modsec-crs-confs /cache
|
||||
VOLUME /www /http-confs /server-confs /modsec-confs /modsec-crs-confs /cache /pre-server-confs
|
||||
|
||||
EXPOSE 8080/tcp 8443/tcp
|
||||
|
||||
|
|
|
@ -0,0 +1,104 @@
|
|||
from Config import Config
|
||||
|
||||
class AutoConf :
|
||||
|
||||
def __init__(self, swarm) :
|
||||
self.__swarm = swarm
|
||||
self.__instances = {}
|
||||
self.__sites = {}
|
||||
self.__config = Config(self.__swarm)
|
||||
|
||||
def pre_process(self, objs) :
|
||||
for instance in objs :
|
||||
(id, name, labels) = self.__get_infos(instance)
|
||||
if "bunkerized-nginx.AUTOCONF" in labels :
|
||||
if self.__swarm :
|
||||
self.__process_instance(instance, "create", id, name, labels)
|
||||
else :
|
||||
if instance.status in ("restarting", "running", "created", "exited") :
|
||||
self.__process_instance(instance, "create", id, name, labels)
|
||||
if instance.status == "running" :
|
||||
self.__process_instance(instance, "start", id, name, labels)
|
||||
|
||||
for server in objs :
|
||||
(id, name, labels) = self.__get_infos(server)
|
||||
if "bunkerized-nginx.SERVER_NAME" in labels :
|
||||
if self.__swarm :
|
||||
self.__process_server(server, "create", id, name, labels)
|
||||
else :
|
||||
if server.status in ("restarting", "running", "created", "exited") :
|
||||
self.__process_server(server, "create", id, name, labels)
|
||||
if server.status == "running" :
|
||||
self.__process_server(server, "start", id, name, labels)
|
||||
|
||||
def process(self, obj, event) :
|
||||
(id, name, labels) = self.__get_infos(obj)
|
||||
if "bunkerized-nginx.AUTOCONF" in labels :
|
||||
self.__process_instance(obj, event, id, name, labels)
|
||||
elif "bunkerized-nginx.SERVER_NAME" in labels :
|
||||
self.__process_server(obj, event, id, name, labels)
|
||||
|
||||
def __get_infos(self, obj) :
|
||||
if self.__swarm :
|
||||
id = obj["Actor"]["ID"]
|
||||
name = obj["Actor"]["Attributes"]["name"]
|
||||
labels = obj.attrs["Spec"]["Labels"]
|
||||
else :
|
||||
id = obj.id
|
||||
name = obj.name
|
||||
labels = obj.labels
|
||||
return (id, name, labels)
|
||||
|
||||
def __process_instance(self, instance, event, id, name, labels) :
|
||||
if event == "create" :
|
||||
self.__instances[id] = obj
|
||||
utils.log("[*] bunkerized-nginx instance created : " + name + " / " + id)
|
||||
elif event == "start" :
|
||||
self.__instances[id].reload()
|
||||
utils.log("[*] bunkerized-nginx instance started : " + name + " / " + id)
|
||||
elif event == "die" :
|
||||
self.__instances[id].reload()
|
||||
utils.log("[*] bunkerized-nginx instance stopped : " + name + " / " + id)
|
||||
elif event == "destroy" or event == "remove" :
|
||||
del self.__instances[id]
|
||||
utils.log("[*] bunkerized-nginx instance removed : " + name + " / " + id)
|
||||
|
||||
def __process_server(self, instance, event, id, name, labels) :
|
||||
vars = { k.replace("bunkerized-nginx.", "", 1) : v for k, v in labels.items() if k.startswith("bunkerized-nginx.")}
|
||||
if event == "create" :
|
||||
if self.__config.generate(instances, vars) :
|
||||
utils.log("[*] Generated config for " + vars["SERVER_NAME"])
|
||||
self.__servers[id] = obj
|
||||
if self.__swarm :
|
||||
if self.__config.activate(instances, vars) :
|
||||
utils.log("[*] Activated config for " + vars["SERVER_NAME"])
|
||||
else :
|
||||
utils.log("[!] Can't activate config for " + vars["SERVER_NAME"])
|
||||
else :
|
||||
utils.log("[!] Can't generate config for " + vars["SERVER_NAME"])
|
||||
elif event == "start" :
|
||||
if id in self.__servers :
|
||||
self.__servers[id].reload()
|
||||
if self.__config.activate(instances, vars) :
|
||||
utils.log("[*] Activated config for " + vars["SERVER_NAME"])
|
||||
else :
|
||||
utils.log("[!] Can't activate config for " + vars["SERVER_NAME"])
|
||||
elif event == "die" :
|
||||
if id in self.__servers :
|
||||
self.__servers[id].reload()
|
||||
if self.__config.deactivate(instances, vars) :
|
||||
utils.log("[*] Deactivated config for " + vars["SERVER_NAME"])
|
||||
else :
|
||||
utils.log("[!] Can't deactivate config for " + vars["SERVER_NAME"])
|
||||
elif event == "destroy" or event == "remove" :
|
||||
if id in self.__servers :
|
||||
if self.__swarm :
|
||||
if self.__config.deactivate(instances, vars) :
|
||||
utils.log("[*] Deactivated config for " + vars["SERVER_NAME"])
|
||||
else :
|
||||
utils.log("[!] Can't deactivate config for " + vars["SERVER_NAME"])+
|
||||
del self.__servers[id]
|
||||
if self.__config.remove(vars) :
|
||||
utils.log("[*] Removed config for " + vars["SERVER_NAME"])
|
||||
else :
|
||||
utils.log("[!] Can't remove config for " + vars["SERVER_NAME"])
|
|
@ -0,0 +1,101 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import utils
|
||||
import subprocess, shutil, os, traceback
|
||||
|
||||
class Config :
|
||||
|
||||
def __init__(self, swarm) :
|
||||
self.__swarm = swarm
|
||||
|
||||
def generate(self, instances, vars) :
|
||||
try :
|
||||
# Get env vars from bunkerized-nginx instances
|
||||
vars_instances = {}
|
||||
for instance_id, instance in instances.items() :
|
||||
if self.__swarm :
|
||||
env = instance.attrs["Spec"]["TaskTemplate"]["ContainerSpec"]["Env"]
|
||||
else :
|
||||
env = instance.attrs["Config"]["Env"]
|
||||
for var_value in env :
|
||||
var = var_value.split("=")[0]
|
||||
value = var_value.replace(var + "=", "", 1)
|
||||
vars_instances[var] = value
|
||||
vars_defaults = vars.copy()
|
||||
vars_defaults.update(vars_instances)
|
||||
vars_defaults.update(vars)
|
||||
# Call site-config.sh to generate the config
|
||||
proc = subprocess.run(["/opt/entrypoint/site-config.sh", vars["SERVER_NAME"]], env=vars_defaults, capture_output=True)
|
||||
if proc.returncode == 0 :
|
||||
return True
|
||||
except Exception as e :
|
||||
traceback.print_exc()
|
||||
utils.log("[!] Error while generating config : " + str(e))
|
||||
return False
|
||||
|
||||
def activate(self, instances, vars) :
|
||||
try :
|
||||
# Check if file exists
|
||||
if not os.path.isfile("/etc/nginx/" + vars["SERVER_NAME"] + "/server.conf") :
|
||||
utils.log("[!] /etc/nginx/" + vars["SERVER_NAME"] + "/server.conf doesn't exist")
|
||||
return False
|
||||
|
||||
# Include the server conf
|
||||
utils.replace_in_file("/etc/nginx/nginx.conf", "}", "include /etc/nginx/" + vars["SERVER_NAME"] + "/server.conf;\n}")
|
||||
|
||||
return self.__reload(instances)
|
||||
except Exception as e :
|
||||
utils.log("[!] Error while activating config : " + str(e))
|
||||
return False
|
||||
|
||||
def deactivate(self, instances, vars) :
|
||||
try :
|
||||
# Check if file exists
|
||||
if not os.path.isfile("/etc/nginx/" + vars["SERVER_NAME"] + "/server.conf") :
|
||||
utils.log("[!] /etc/nginx/" + vars["SERVER_NAME"] + "/server.conf doesn't exist")
|
||||
return False
|
||||
|
||||
# Remove the include
|
||||
utils.replace_in_file("/etc/nginx/nginx.conf", "include /etc/nginx/" + vars["SERVER_NAME"] + "/server.conf;\n", "")
|
||||
|
||||
return self.__reload(instances)
|
||||
|
||||
except Exception as e :
|
||||
utils.log("[!] Error while deactivating config : " + str(e))
|
||||
return False
|
||||
|
||||
def remove(self, instances, vars) :
|
||||
try :
|
||||
# Check if file exists
|
||||
if not os.path.isfile("/etc/nginx/" + vars["SERVER_NAME"] + "/server.conf") :
|
||||
utils.log("[!] /etc/nginx/" + vars["SERVER_NAME"] + "/server.conf doesn't exist")
|
||||
return False
|
||||
|
||||
# Remove the folder
|
||||
shutil.rmtree("/etc/nginx/" + vars["SERVER_NAME"])
|
||||
return True
|
||||
except Exception as e :
|
||||
utils.log("[!] Error while deactivating config : " + str(e))
|
||||
return False
|
||||
|
||||
def __reload(self, instances) :
|
||||
# Send SIGHUP to all running instances
|
||||
ret = True
|
||||
for instance_id, instance in instances.items() :
|
||||
if self.__swarm :
|
||||
# TODO : send POST requests on http://service-name:8000/reload ?
|
||||
#name = instance.attrs["Spec"]["Attrs"]
|
||||
#req = requests.post("http://" + name + ":8000/reload")
|
||||
#if req and req.status_code == 200 :
|
||||
# utils.log("[*] ")
|
||||
#else :
|
||||
#
|
||||
#pass
|
||||
elif instance.status == "running" :
|
||||
try :
|
||||
instance.kill("SIGHUP")
|
||||
utils.log("[*] Sent SIGHUP signal to bunkerized-nginx instance " + instance.name + " / " + instance.id)
|
||||
except docker.errors.APIError as e :
|
||||
utils.log("[!] Docker error while sending SIGHUP signal : " + str(e))
|
||||
ret = False
|
||||
return ret
|
|
@ -1,85 +0,0 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import utils
|
||||
import subprocess, shutil, os, traceback
|
||||
|
||||
def generate(instances, vars) :
|
||||
try :
|
||||
# Get env vars from bunkerized-nginx instances
|
||||
vars_instances = {}
|
||||
for instance_id, instance in instances.items() :
|
||||
for var_value in instance.attrs["Config"]["Env"] :
|
||||
var = var_value.split("=")[0]
|
||||
value = var_value.replace(var + "=", "", 1)
|
||||
vars_instances[var] = value
|
||||
vars_defaults = vars.copy()
|
||||
vars_defaults.update(vars_instances)
|
||||
vars_defaults.update(vars)
|
||||
# Call site-config.sh to generate the config
|
||||
proc = subprocess.run(["/opt/entrypoint/site-config.sh", vars["SERVER_NAME"]], env=vars_defaults, capture_output=True)
|
||||
if proc.returncode == 0 :
|
||||
return True
|
||||
except Exception as e :
|
||||
traceback.print_exc()
|
||||
utils.log("[!] Error while generating config : " + str(e))
|
||||
return False
|
||||
|
||||
def activate(instances, vars) :
|
||||
try :
|
||||
# Check if file exists
|
||||
if not os.path.isfile("/etc/nginx/" + vars["SERVER_NAME"] + "/server.conf") :
|
||||
utils.log("[!] /etc/nginx/" + vars["SERVER_NAME"] + "/server.conf doesn't exist")
|
||||
return False
|
||||
|
||||
# Include the server conf
|
||||
utils.replace_in_file("/etc/nginx/nginx.conf", "}", "include /etc/nginx/" + vars["SERVER_NAME"] + "/server.conf;\n}")
|
||||
|
||||
# Send SIGHUP to all running instances
|
||||
for instance_id, instance in instances.items() :
|
||||
if instance.status == "running" :
|
||||
try :
|
||||
instance.kill("SIGHUP")
|
||||
utils.log("[*] Sent SIGHUP signal to bunkerized-nginx instance " + instance.name + " / " + instance.id)
|
||||
except docker.errors.APIError as e :
|
||||
utils.log("[!] Docker error while sending SIGHUP signal : " + str(e))
|
||||
return True
|
||||
except Exception as e :
|
||||
utils.log("[!] Error while activating config : " + str(e))
|
||||
return False
|
||||
|
||||
def deactivate(instances, vars) :
|
||||
try :
|
||||
# Check if file exists
|
||||
if not os.path.isfile("/etc/nginx/" + vars["SERVER_NAME"] + "/server.conf") :
|
||||
utils.log("[!] /etc/nginx/" + vars["SERVER_NAME"] + "/server.conf doesn't exist")
|
||||
return False
|
||||
|
||||
# Remove the include
|
||||
utils.replace_in_file("/etc/nginx/nginx.conf", "include /etc/nginx/" + vars["SERVER_NAME"] + "/server.conf;\n", "")
|
||||
|
||||
# Send SIGHUP to all running instances
|
||||
for instance_id, instance in instances.items() :
|
||||
if instance.status == "running" :
|
||||
try :
|
||||
instance.kill("SIGHUP")
|
||||
utils.log("[*] Sent SIGHUP signal to bunkerized-nginx instance " + instance.name + " / " + instance.id)
|
||||
except docker.errors.APIError as e :
|
||||
utils.log("[!] Docker error while sending SIGHUP signal : " + str(e))
|
||||
return True
|
||||
except Exception as e :
|
||||
utils.log("[!] Error while deactivating config : " + str(e))
|
||||
return False
|
||||
|
||||
def remove(instances, vars) :
|
||||
try :
|
||||
# Check if file exists
|
||||
if not os.path.isfile("/etc/nginx/" + vars["SERVER_NAME"] + "/server.conf") :
|
||||
utils.log("[!] /etc/nginx/" + vars["SERVER_NAME"] + "/server.conf doesn't exist")
|
||||
return False
|
||||
|
||||
# Remove the folder
|
||||
shutil.rmtree("/etc/nginx/" + vars["SERVER_NAME"])
|
||||
return True
|
||||
except Exception as e :
|
||||
utils.log("[!] Error while deactivating config : " + str(e))
|
||||
return False
|
|
@ -1,58 +1,9 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
from AutoConf import AutoConf
|
||||
import utils, config
|
||||
import docker, os, stat, sys
|
||||
|
||||
def process(container, event) :
|
||||
global instances, containers
|
||||
|
||||
# Process instance event
|
||||
if "bunkerized-nginx.AUTOCONF" in container.labels :
|
||||
if event == "create" :
|
||||
instances[container.id] = container
|
||||
utils.log("[*] bunkerized-nginx instance created : " + container.name + " / " + container.id)
|
||||
elif event == "start" :
|
||||
instances[container.id].reload()
|
||||
utils.log("[*] bunkerized-nginx instance started : " + container.name + " / " + container.id)
|
||||
elif event == "die" :
|
||||
instances[container.id].reload()
|
||||
utils.log("[*] bunkerized-nginx instance stopped : " + container.name + " / " + container.id)
|
||||
elif event == "destroy" :
|
||||
del instances[container.id]
|
||||
utils.log("[*] bunkerized-nginx instance removed : " + container.name + " / " + container.id)
|
||||
|
||||
# Process container event
|
||||
elif "bunkerized-nginx.SERVER_NAME" in container.labels :
|
||||
# Convert labels to env vars
|
||||
vars = { k.replace("bunkerized-nginx.", "", 1) : v for k, v in container.labels.items() if k.startswith("bunkerized-nginx.")}
|
||||
if event == "create" :
|
||||
if config.generate(instances, vars) :
|
||||
utils.log("[*] Generated config for " + vars["SERVER_NAME"])
|
||||
containers[container.id] = container
|
||||
else :
|
||||
utils.log("[!] Can't generate config for " + vars["SERVER_NAME"])
|
||||
elif event == "start" :
|
||||
if container.id in containers :
|
||||
containers[container.id].reload()
|
||||
if config.activate(instances, vars) :
|
||||
utils.log("[*] Activated config for " + vars["SERVER_NAME"])
|
||||
else :
|
||||
utils.log("[!] Can't activate config for " + vars["SERVER_NAME"])
|
||||
elif event == "die" :
|
||||
if container.id in containers :
|
||||
containers[container.id].reload()
|
||||
if config.deactivate(instances, vars) :
|
||||
utils.log("[*] Deactivated config for " + vars["SERVER_NAME"])
|
||||
else :
|
||||
utils.log("[!] Can't deactivate config for " + vars["SERVER_NAME"])
|
||||
elif event == "destroy" :
|
||||
if container.id in containers :
|
||||
del containers[container.id]
|
||||
if config.remove(vars) :
|
||||
utils.log("[*] Removed config for " + vars["SERVER_NAME"])
|
||||
else :
|
||||
utils.log("[!] Can't remove config for " + vars["SERVER_NAME"])
|
||||
|
||||
# Connect to the endpoint
|
||||
endpoint = "/var/run/docker.sock"
|
||||
if not os.path.exists(endpoint) or not stat.S_ISSOCK(os.stat(endpoint).st_mode) :
|
||||
|
@ -64,54 +15,44 @@ except Exception as e :
|
|||
utils.log("[!] Can't instantiate DockerClient : " + str(e))
|
||||
sys.exit(2)
|
||||
|
||||
# Check if we are in Swarm mode
|
||||
swarm = os.getenv("SWARM_MODE") == "yes"
|
||||
|
||||
# Our object to process events
|
||||
autoconf = AutoConf(swarm)
|
||||
|
||||
# Get all bunkerized-nginx instances and web services created before
|
||||
instances = {}
|
||||
containers = {}
|
||||
try :
|
||||
before = client.containers.list(all=True, filters={"label" : "bunkerized-nginx.AUTOCONF"}) + client.containers.list(all=True, filters={"label" : "bunkerized-nginx.SERVER_NAME"})
|
||||
if swarm :
|
||||
before = client.services.list(filters={"label" : "bunkerized-nginx.AUTOCONF"}) + client.services.list(filters={"label" : "bunkerized-nginx.SERVER_NAME"})
|
||||
else :
|
||||
before = client.containers.list(all=True, filters={"label" : "bunkerized-nginx.AUTOCONF"}) + client.containers.list(filters={"label" : "bunkerized-nginx.SERVER_NAME"})
|
||||
except docker.errors.APIError as e :
|
||||
utils.log("[!] Docker API error " + str(e))
|
||||
sys.exit(3)
|
||||
# Process instances first
|
||||
for instance in before :
|
||||
if "bunkerized-nginx.AUTOCONF" in instance.labels :
|
||||
if instance.status in ("restarting", "running", "created", "exited") :
|
||||
process(instance, "create")
|
||||
if instance.status == "running" :
|
||||
process(instance, "start")
|
||||
# Containers after
|
||||
for container in before :
|
||||
if "bunkerized-nginx.SERVER_NAME" in container.labels :
|
||||
if container.status in ("restarting", "running", "created", "exited") :
|
||||
process(container, "create")
|
||||
if container.status == "running" :
|
||||
process(container, "start")
|
||||
|
||||
# Process them before events
|
||||
autoconf.pre_process(before)
|
||||
|
||||
# Process events received from Docker
|
||||
try :
|
||||
for event in client.events(decode=True) :
|
||||
|
||||
# Process only container events
|
||||
if event["Type"] != "container" :
|
||||
# Process only container/service events
|
||||
if (swarm and event["Type"] != "service") or (not swarm and event["Type"] != "container") :
|
||||
continue
|
||||
|
||||
# Get Container object
|
||||
# Get Container/Service object
|
||||
try :
|
||||
container = client.containers.get(event["id"])
|
||||
if swarm :
|
||||
server = client.services.get(service_id=event["Actor"]["ID"])
|
||||
else :
|
||||
server = client.containers.get(event["id"])
|
||||
except docker.errors.NotFound as e :
|
||||
continue
|
||||
|
||||
# Check if there is an interesting label
|
||||
interesting = False
|
||||
for label in container.labels :
|
||||
if label in ("bunkerized-nginx.SERVER_NAME", "bunkerized-nginx.AUTOCONF") :
|
||||
interesting = True
|
||||
break
|
||||
if not interesting :
|
||||
continue
|
||||
|
||||
# Process the event
|
||||
process(container, event["Action"])
|
||||
autoconf.process(server, event["Action"])
|
||||
|
||||
except docker.errors.APIError as e :
|
||||
utils.log("[!] Docker API error " + str(e))
|
||||
|
|
Loading…
Reference in New Issue