autoconf - fix wait and redis

This commit is contained in:
bunkerity 2021-08-02 16:37:50 +02:00
parent 5a26d06c87
commit 021147f9d9
No known key found for this signature in database
GPG Key ID: 3D80806F12602A7C
12 changed files with 66 additions and 36 deletions

View File

@ -137,6 +137,7 @@ class Config :
if self.__lock :
self.__lock.acquire()
ret = True
nb = 0
urls = []
if self.__type == Controller.Type.SWARM :
for instance in instances :
@ -158,9 +159,10 @@ class Config :
pass
if req and req.status_code == 200 and req.text == "ok" :
log("config", "INFO", "successfully sent API order to " + url)
nb += 1
else :
log("config", "INFO", "failed API order to " + url)
ret = False
if self.__lock :
self.__lock.release()
return ret
return ret and nb > 0

View File

@ -1,4 +1,4 @@
import docker
import docker, time
import Controller
from logger import log
@ -58,5 +58,14 @@ class DockerController(Controller.Controller) :
def wait(self) :
# TODO : healthcheck ?
return True
# Wait for a container
instances = self.__get_instances()
while len(instances) == 0 :
time.sleep(1)
instances = self.__get_instances()
# Generate first config
env = self.get_env()
if not self.gen_conf(env) :
return False, env
# Wait for nginx
return self._config.wait(instances), env

View File

@ -1,4 +1,4 @@
import docker
import docker, time
from threading import Lock
from logger import log
@ -59,4 +59,14 @@ class SwarmController(Controller.Controller) :
return self._reload(self.__get_instances())
def wait(self) :
return self._config.wait(self.__get_instances())
# Wait for a service
instances = self.__get_instances()
while len(instances) == 0 :
time.sleep(1)
instances = self.__get_instances()
# Generate first config
env = self.get_env()
if not self.gen_conf(env) :
return False, env
# Wait for nginx
return self._config.wait(instances), env

View File

@ -32,21 +32,14 @@ if swarm or kubernetes :
log("autoconf", "INFO", "start reload server in background")
(server, thread) = run_reload_server(controller)
# Apply the first config for existing services
current_env = controller.get_env()
if current_env != {} :
log("autoconf", "INFO", "generating the initial configuration...")
if controller.gen_conf(current_env) :
log("autoconf", "INFO", "initial configuration successfully generated")
else :
log("autoconf", "ERROR", "error while generating initial configuration")
# Wait for instances
if controller.wait() :
log("autoconf", "INFO", "wait until a bunkerized-nginx instance is started ...")
ret, env = controller.wait()
if ret :
log("autoconf", "INFO", "bunkerized-nginx instances started")
else :
log("autoconf", "ERROR", "bunkerized-nginx instances not started")
# Process events
log("autoconf", "INFO", "waiting for events ...")
controller.process_events(current_env)
controller.process_events(env)

View File

@ -10,7 +10,8 @@ class Abusers(Job) :
filename = "abusers.list"
type = "line"
regex = r"^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}/?[0-9]*$"
super().__init__(name, data, filename, redis_host=redis_host, type=type, regex=regex, copy_cache=copy_cache)
redis_ex = 86400
super().__init__(name, data, filename, redis_host=redis_host, redis_ex=redis_ex, type=type, regex=regex, copy_cache=copy_cache)
def _edit(self, chunk) :
if self._redis != None :

View File

@ -10,7 +10,8 @@ class ExitNodes(Job) :
filename = "tor-exit-nodes.list"
type = "line"
regex = r"^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}/?[0-9]*$"
super().__init__(name, data, filename, redis_host=redis_host, type=type, regex=regex, copy_cache=copy_cache)
redis_ex = 3600
super().__init__(name, data, filename, redis_host=redis_host, redis_ex=redis_ex, type=type, regex=regex, copy_cache=copy_cache)
def _edit(self, chunk) :
if self._redis != None :

View File

@ -61,7 +61,7 @@ class JobManagement() :
class Job(abc.ABC) :
def __init__(self, name, data, filename=None, redis_host=None, type="line", regex=r"^.+$", copy_cache=False) :
def __init__(self, name, data, filename=None, redis_host=None, redis_ex=86400, type="line", regex=r"^.+$", copy_cache=False) :
self._name = name
self._data = data
self._filename = filename
@ -72,6 +72,7 @@ class Job(abc.ABC) :
self._redis.echo("test")
except :
log(self._name, "ERROR", "can't connect to redis host " + redis_host)
self._redis_ex = redis_ex
self._type = type
self._regex = regex
self._copy_cache = copy_cache
@ -119,9 +120,9 @@ class Job(abc.ABC) :
else :
if self._type == "line" :
for chunk in chunks :
pipe.set(self._name + "_" + chunk, "1")
pipe.set(self._name + "_" + chunk, "1", ex=self._redis_ex)
else :
pipe.set(self._name + "_" + chunk, "1")
pipe.set(self._name + "_" + chunk, "1", ex=self._redis_ex)
count += 1
if self._redis == None :
@ -132,7 +133,6 @@ class Job(abc.ABC) :
return JobRet.OK_RELOAD
elif self._redis != None and count > 0 :
self._redis.delete(self._redis.keys(self._name + "_*"))
pipe.execute()
return JobRet.OK_RELOAD
@ -178,7 +178,6 @@ class Job(abc.ABC) :
return JobRet.OK_NO_RELOAD
if self._redis != None and self._type == "line" :
self._redis.delete(self._redis.keys(self._name + "_*"))
with open("/opt/bunkerized-nginx/cache/" + self._filename) as f :
pipe = self._redis.pipeline()
while True :
@ -186,7 +185,7 @@ class Job(abc.ABC) :
if not line :
break
line = line.strip()
pipe.set(self._name + "_" + line, "1")
pipe.set(self._name + "_" + line, "1", ex=self._redis_ex)
pipe.execute()
return JobRet.OK_NO_RELOAD

View File

@ -10,7 +10,8 @@ class Proxies(Job) :
filename = "proxies.list"
type = "line"
regex = r"^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}/?[0-9]*$"
super().__init__(name, data, filename, redis_host=redis_host, type=type, regex=regex, copy_cache=copy_cache)
redis_ex = 86400
super().__init__(name, data, filename, redis_host=redis_host, redis_ex=redis_ex, type=type, regex=regex, copy_cache=copy_cache)
def _edit(self, chunk) :
if self._redis != None :

View File

@ -8,7 +8,8 @@ class Referrers(Job) :
filename = "referrers.list"
type = "line"
regex = r"^.+$"
super().__init__(name, data, filename, redis_host=redis_host, type=type, regex=regex, copy_cache=copy_cache)
redis_ex = 86400
super().__init__(name, data, filename, redis_host=redis_host, redis_ex=redis_ex, type=type, regex=regex, copy_cache=copy_cache)
def _edit(self, chunk) :
return [chunk.replace(b".", b"%.").replace(b"-", b"%-")]

View File

@ -8,7 +8,8 @@ class UserAgents(Job) :
filename = "user-agents.list"
type = "line"
regex = r"^.+$"
super().__init__(name, data, filename, redis_host=redis_host, type=type, regex=regex, copy_cache=copy_cache)
redis_ex = 86400
super().__init__(name, data, filename, redis_host=redis_host, redis_ex=redis_ex, type=type, regex=regex, copy_cache=copy_cache)
def _edit(self, chunk) :
return [chunk.replace(b"\\ ", b" ").replace(b"\\.", b"%.").replace(b"\\\\", b"\\").replace(b"-", b"%-")]

View File

@ -1,6 +1,6 @@
#!/usr/bin/python3
import argparse, sys
import argparse, sys, re
sys.path.append("/opt/bunkerized-nginx/jobs")
@ -26,7 +26,6 @@ if __name__ == "__main__" :
# Parse arguments
parser = argparse.ArgumentParser(description="job runner for bunkerized-nginx")
parser.add_argument("--name", default="", type=str, help="job to run (e.g : abusers or certbot-new or certbot-renew ...)")
parser.add_argument("--redis", default=None, type=str, help="hostname of the redis server if any")
parser.add_argument("--cache", action="store_true", help="copy data from cache if available")
parser.add_argument("--reload", action="store_true", help="reload nginx if necessary and the job is successful")
parser.add_argument("--domain", default="", type=str, help="domain(s) for certbot-new job (e.g. : www.example.com or app1.example.com,app2.example.com)")
@ -48,15 +47,27 @@ if __name__ == "__main__" :
management = JobManagement()
management.lock()
# Check if we are using redis or not
redis_host = None
try :
with open("/etc/nginx/global.env", "r") as f :
data = f.read()
if re.search(r"^USE_REDIS=yes$", data, re.MULTILINE) :
re_match = re.search(r"^REDIS_HOST=(.+)$", data, re.MULTILINE)
if re_match :
redis_host = re_match.group(1)
except :
log("job", "ERROR", "can't check if redis is used")
# Run job
log("job", "INFO", "executing job " + job)
ret = 0
if job == "certbot-new" :
instance = JOBS[job](redis_host=args.redis, copy_cache=args.cache, domain=args.domain, email=args.email, staging=args.staging)
instance = JOBS[job](redis_host=redis_host, copy_cache=args.cache, domain=args.domain, email=args.email, staging=args.staging)
elif job == "self-signed-cert" :
instance = JOBS[job](redis_host=args.redis, copy_cache=args.cache, dst_cert=args.dst_cert, dst_key=args.dst_key, expiry=args.expiry, subj=args.subj)
instance = JOBS[job](redis_host=redis_host, copy_cache=args.cache, dst_cert=args.dst_cert, dst_key=args.dst_key, expiry=args.expiry, subj=args.subj)
else :
instance = JOBS[job](redis_host=args.redis, copy_cache=args.cache)
instance = JOBS[job](redis_host=redis_host, copy_cache=args.cache)
ret = instance.run()
if ret == JobRet.KO :
log("job", "ERROR", "error while running job " + job)

View File

@ -14,7 +14,7 @@ end
function M.check(self, data)
-- without redis
if self.__data_dict ~= nil and self.__redis_client == nil then
if self.__redis_client == nil then
if self.__type == "simple" then
local value, flags = self.__data_dict:get(data)
return value ~= nil
@ -28,7 +28,7 @@ function M.check(self, data)
end
-- with redis
elseif self.__data_dict == nil and self.__redis_client ~= nil then
else
if self.__type == "simple" then
local res, err = self.__redis_client:get(self.__name .. "_" .. data)
return res and res ~= ngx.null
@ -36,7 +36,8 @@ function M.check(self, data)
local patterns = self.__redis_client:keys(self.__name .. "_*")
if patterns then
for i, pattern in ipairs(patterns) do
if string.match(data, pattern) then
local real_pattern = string.gsub(pattern, self.__name:gsub("%-", "%%-") .. "_", "", 1)
if string.match(data, real_pattern) then
return true
end
end