use annotations as env var in Ingress definition, fix cidr parsing for reserved ips, fix missing empty when job is external, fix ping check for remote api and init work hour/day support for request limit
This commit is contained in:
parent
4e45fa3874
commit
4c77a14825
|
@ -41,7 +41,9 @@ class IngressController(Controller.Controller) :
|
|||
|
||||
def __annotations_to_env(self, annotations) :
|
||||
env = {}
|
||||
prefix = annotations["bunkerized-nginx.SERVER_NAME"].split(" ")[0] + "_"
|
||||
prefix = ""
|
||||
if "bunkerized-nginx.SERVER_NAME" in annotations :
|
||||
prefix = annotations["bunkerized-nginx.SERVER_NAME"].split(" ")[0] + "_"
|
||||
for annotation in annotations :
|
||||
if annotation.startswith("bunkerized-nginx.") and annotation.replace("bunkerized-nginx.", "", 1) != "" and annotation.replace("bunkerized-nginx.", "", 1) != "AUTOCONF" :
|
||||
env[prefix + annotation.replace("bunkerized-nginx.", "", 1)] = annotations[annotation]
|
||||
|
@ -85,6 +87,8 @@ class IngressController(Controller.Controller) :
|
|||
first_servers.extend(env["SERVER_NAME"].split(" "))
|
||||
for ingress in ingresses :
|
||||
env.update(self.__rules_to_env(ingress.spec.rules, namespace=ingress.metadata.namespace))
|
||||
if ingress.metadata.annotations != None :
|
||||
env.update(self.__annotations_to_env(ingress.metadata.annotations))
|
||||
if ingress.spec.tls :
|
||||
for tls_entry in ingress.spec.tls :
|
||||
for host in tls_entry.hosts :
|
||||
|
|
|
@ -37,7 +37,10 @@ local reserved_ips = {
|
|||
"240.0.0.0/4",
|
||||
"255.255.255.255/32"
|
||||
}
|
||||
ngx.shared.reserved_ips:safe_set("cidrs", iputils.parse_cidrs(reserved_ips), 0)
|
||||
local success, err, forcible = ngx.shared.reserved_ips:set("data", cjson.encode(iputils.parse_cidrs(reserved_ips)), 0)
|
||||
if not success then
|
||||
logger.log(ngx.ERR, "INIT", "Can't load reserved IPs : " .. err)
|
||||
end
|
||||
|
||||
-- Load blacklists
|
||||
if not use_redis then
|
||||
|
|
|
@ -90,13 +90,14 @@ http {
|
|||
{% if has_value("BLOCK_REFERRER", "yes") %}lua_shared_dict referrers_cache 10m;{% endif +%}
|
||||
{% if has_value("USE_BAD_BEHAVIOR", "yes") %}lua_shared_dict behavior_ban 10m;{% endif +%}
|
||||
{% if has_value("USE_BAD_BEHAVIOR", "yes") %}lua_shared_dict behavior_count 10m;{% endif +%}
|
||||
{% if has_value("USE_LIMIT_REQ", "yes") %}lua_shared_dict limit_req {{ LIMIT_REQ_CACHE }};{% endif +%}
|
||||
lua_shared_dict plugins_data 10m;
|
||||
lua_shared_dict reserved_ips 1m;
|
||||
{% if has_value("USE_REMOTE_API", "yes") %}lua_shared_dict remote_api 1m;{% endif +%}
|
||||
{% if has_value("USE_REMOTE_API", "yes") %}lua_shared_dict remote_api_db 10m;{% endif +%}
|
||||
|
||||
# shared memory zone for limit_req
|
||||
{% if has_value("USE_LIMIT_REQ", "yes") %}limit_req_zone $binary_remote_addr$uri zone=limit:{{ LIMIT_REQ_CACHE }} rate={{ LIMIT_REQ_RATE }};{% endif +%}
|
||||
#{% if has_value("USE_LIMIT_REQ", "yes") %}limit_req_zone $binary_remote_addr$uri zone=limit:{{ LIMIT_REQ_CACHE }} rate={{ LIMIT_REQ_RATE }};{% endif +%}
|
||||
|
||||
# shared memory zone for limit_conn
|
||||
{% if has_value("USE_LIMIT_CONN", "yes") %}limit_conn_zone $binary_remote_addr zone=ddos:{{ LIMIT_CONN_CACHE }};{% endif +%}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
log_by_lua_block {
|
||||
|
||||
local logger = require "logger"
|
||||
local cjson = require "cjson"
|
||||
|
||||
-- bad behavior
|
||||
local use_bad_behavior = {% if USE_BAD_BEHAVIOR == "yes" %}true{% else %}false{% endif +%}
|
||||
|
@ -22,7 +23,7 @@ local use_remote_api = {% if USE_REMOTE_API == "yes" %}true{% else %}
|
|||
local remoteapi = require "remoteapi"
|
||||
local iputils = require "resty.iputils"
|
||||
|
||||
if use_remote_api and not iputils.ip_in_cidrs(ngx.var.remote_addr, ngx.shared.reserved_ips:get("data")) and ngx.shared.remote_api:get("id") ~= "empty" and ngx.shared.remote_api:get("ping") ~= "ko" then
|
||||
if use_remote_api and not iputils.ip_in_cidrs(ngx.var.remote_addr, cjson.decode(ngx.shared.reserved_ips:get("data"))) and ngx.shared.remote_api:get("id") ~= "empty" and ngx.shared.remote_api:get("ping") ~= "ko" then
|
||||
if ngx.status == ngx.HTTP_FORBIDDEN then
|
||||
local reason = "other"
|
||||
if use_bad_behavior and new_bad_behavior_ban then
|
||||
|
|
|
@ -57,6 +57,11 @@ local dnsbl_list = {% raw %}{{% endraw %}{% if DNSBL_LIST != "" %}{% set elemen
|
|||
-- bad behavior
|
||||
local use_bad_behavior = {% if USE_BAD_BEHAVIOR == "yes" %}true{% else %}false{% endif +%}
|
||||
|
||||
-- limit req
|
||||
local use_req_limit = {% if USE_REQ_LIMIT == "yes" %}true{% else %}false{% endif +%}
|
||||
local limit_req_rate = "{{ LIMIT_REQ_RATE }}"
|
||||
local limit_req_burst = "{{ LIMIT_REQ_BURST }}"
|
||||
|
||||
-- remote API
|
||||
local use_remote_api = {% if USE_REMOTE_API == "yes" %}true{% else %}false{% endif +%}
|
||||
|
||||
|
@ -73,6 +78,7 @@ local behavior = require "behavior"
|
|||
local logger = require "logger"
|
||||
local redis = require "resty.redis"
|
||||
local checker = require "checker"
|
||||
local limitreq = require "limitreq"
|
||||
|
||||
-- user variables
|
||||
local antibot_uri = "{{ ANTIBOT_URI }}"
|
||||
|
@ -148,6 +154,11 @@ if use_bad_behavior and behavior.is_banned() then
|
|||
ngx.exit(ngx.HTTP_FORBIDDEN)
|
||||
end
|
||||
|
||||
-- check if IP is banned because of "request limit"
|
||||
-- if use_req_limit and reqlimit.check() then
|
||||
-- ngx.exit(ngx.HTTP_FORBIDDEN)
|
||||
-- end
|
||||
|
||||
-- our redis client
|
||||
local redis_client = nil
|
||||
if use_redis then
|
||||
|
|
|
@ -65,9 +65,9 @@ server {
|
|||
}
|
||||
|
||||
# requests limiting
|
||||
{% if USE_LIMIT_REQ == "yes" +%}
|
||||
include {{ NGINX_PREFIX }}limit-req.conf;
|
||||
{% endif %}
|
||||
#{% if USE_LIMIT_REQ == "yes" +%}
|
||||
# include {{ NGINX_PREFIX }}limit-req.conf;
|
||||
#{% endif %}
|
||||
|
||||
# connections limiting
|
||||
{% if USE_LIMIT_CONN == "yes" +%}
|
||||
|
|
|
@ -120,7 +120,7 @@ class Job(abc.ABC) :
|
|||
# if self._type == "file" :
|
||||
# mode = "ab"
|
||||
# file = open("/tmp/" + self._filename, mode)
|
||||
file = open("/tmp/" + self._filename, "ab")
|
||||
file = open("/tmp/" + self._filename, "wb")
|
||||
|
||||
elif self._redis != None :
|
||||
pipe = self._redis.pipeline()
|
||||
|
@ -153,8 +153,8 @@ class Job(abc.ABC) :
|
|||
|
||||
if self._redis == None :
|
||||
file.close()
|
||||
if count > 0 :
|
||||
shutil.copyfile("/tmp/" + self._filename, "/etc/nginx/" + self._filename)
|
||||
#if count > 0 :
|
||||
shutil.copyfile("/tmp/" + self._filename, "/etc/nginx/" + self._filename)
|
||||
os.remove("/tmp/" + self._filename)
|
||||
return JobRet.OK_RELOAD
|
||||
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
local M = {}
|
||||
local logger = require "logger"
|
||||
|
||||
function M.decr (key, delay)
|
||||
local function callback (premature, key)
|
||||
if premature then
|
||||
ngx.shared.limit_req:delete(key)
|
||||
return
|
||||
end
|
||||
local value, flags = ngx.shared.limit_req:get(key)
|
||||
if value ~= nil then
|
||||
if value - 1 == 0 then
|
||||
ngx.shared.limit_req:delete(key)
|
||||
return
|
||||
end
|
||||
ngx.shared.limit_req:set(key, value-1, 0)
|
||||
end
|
||||
end
|
||||
local hdl, err = ngx.timer.at(delay, callback, key)
|
||||
if not ok then
|
||||
logger.log(ngx.ERR, "REQ LIMIT", "can't setup decrement timer : " .. err)
|
||||
return false
|
||||
end
|
||||
return true
|
||||
end
|
||||
|
||||
function M.incr (key)
|
||||
local newval, err, forcible = ngx.shared.limit_req:incr(key, 1, 0, 0)
|
||||
if not newval then
|
||||
logger.log(ngx.ERR, "REQ LIMIT", "can't increment counter : " .. err)
|
||||
return false
|
||||
end
|
||||
return true
|
||||
end
|
||||
|
||||
function M.check (url, rate)
|
||||
if url == "/" or url == ngx.var.request_uri then
|
||||
local key = ngx.var.remote_addr .. url
|
||||
local rate_split = rate:gmatch("([^/]+)")
|
||||
local max = rate_split[1]
|
||||
local unit = rate_split[2]
|
||||
local delay = 0
|
||||
if unit == "s" then
|
||||
delay = 1
|
||||
elseif unit == "m" then
|
||||
delay = 60
|
||||
elseif unit == "h" then
|
||||
delay = 3600
|
||||
elseif unit == "d" then
|
||||
delay = 86400
|
||||
end
|
||||
if M.incr(key) then
|
||||
local current, flags = ngx.shared.limit_req:get(key)
|
||||
if M.decr(key, delay) then
|
||||
if current > max then
|
||||
logger.log(ngx.WARN, "REQ LIMIT", "ip " .. ngx.var.remote_addr .. " has reached the limit : " .. current .. "/" .. unit .. " (max = " .. rate .. ")")
|
||||
return true
|
||||
end
|
||||
else
|
||||
ngx.shared.limit_req:set(key, current-1, 0)
|
||||
end
|
||||
end
|
||||
end
|
||||
return false
|
||||
end
|
||||
|
||||
return M
|
|
@ -55,8 +55,9 @@ function M.ping2()
|
|||
source = ltn12.source.string(request_body),
|
||||
sink = ltn12.sink.table(response_body)
|
||||
}
|
||||
if res and status == 200 and response_body["data"] == "pong" then
|
||||
return true
|
||||
if res and status:match("^.*% 200% .*$") then
|
||||
response_body = cjson.decode(response_body[1])
|
||||
return response_body["data"] == "pong"
|
||||
end
|
||||
return false
|
||||
end
|
||||
|
|
Loading…
Reference in New Issue