Squashed 'src/deps/src/lua-resty-lock/' content from commit 9dc550e56

git-subtree-dir: src/deps/src/lua-resty-lock
git-subtree-split: 9dc550e56b6f3b1a2f1a31bb270a91813b5b6861
This commit is contained in:
Théophile Diot 2023-06-30 15:38:39 -04:00
commit 746a6e16d0
9 changed files with 1362 additions and 0 deletions

1
.gitattributes vendored Normal file
View File

@ -0,0 +1 @@
*.t linguist-language=Text

10
.gitignore vendored Normal file
View File

@ -0,0 +1,10 @@
*.swp
*.swo
*~
go
t/servroot/
reindex
nginx
ctags
tags
a.lua

67
.travis.yml Normal file
View File

@ -0,0 +1,67 @@
sudo: required
dist: bionic
os: linux
language: c
compiler:
- gcc
cache:
directories:
- download-cache
env:
global:
- JOBS=3
- NGX_BUILD_JOBS=$JOBS
- LUAJIT_PREFIX=/opt/luajit21
- LUAJIT_LIB=$LUAJIT_PREFIX/lib
- LUAJIT_INC=$LUAJIT_PREFIX/include/luajit-2.1
- LUA_INCLUDE_DIR=$LUAJIT_INC
- LUA_CMODULE_DIR=/lib
- OPENSSL_PREFIX=/opt/ssl
- OPENSSL_LIB=$OPENSSL_PREFIX/lib
- OPENSSL_INC=$OPENSSL_PREFIX/include
- OPENSSL_VER=1.1.1k
- LD_LIBRARY_PATH=$LUAJIT_LIB:$LD_LIBRARY_PATH
- TEST_NGINX_SLEEP=0.006
matrix:
- NGINX_VERSION=1.19.9
install:
- if [ ! -d download-cache ]; then mkdir download-cache; fi
- if [ ! -f download-cache/openssl-$OPENSSL_VER.tar.gz ]; then wget -O download-cache/openssl-$OPENSSL_VER.tar.gz https://www.openssl.org/source/openssl-$OPENSSL_VER.tar.gz; fi
- sudo apt-get install -qq -y cpanminus axel
- sudo cpanm --notest Test::Nginx > build.log 2>&1 || (cat build.log && exit 1)
- git clone https://github.com/openresty/openresty.git ../openresty
- git clone https://github.com/openresty/lua-resty-core.git ../lua-resty-core
- git clone https://github.com/openresty/lua-resty-lrucache.git ../lua-resty-lrucache
- git clone https://github.com/openresty/nginx-devel-utils.git
- git clone https://github.com/simpl/ngx_devel_kit.git ../ndk-nginx-module
- git clone https://github.com/openresty/lua-nginx-module.git ../lua-nginx-module
- git clone https://github.com/openresty/no-pool-nginx.git ../no-pool-nginx
- git clone -b v2.1-agentzh https://github.com/openresty/luajit2.git
- git clone https://github.com/openresty/mockeagain.git
script:
- cd luajit2/
- make -j$JOBS CCDEBUG=-g Q= PREFIX=$LUAJIT_PREFIX CC=$CC XCFLAGS='-DLUA_USE_APICHECK -DLUA_USE_ASSERT' > build.log 2>&1 || (cat build.log && exit 1)
- sudo make install PREFIX=$LUAJIT_PREFIX > build.log 2>&1 || (cat build.log && exit 1)
- cd ..
- tar zxf download-cache/openssl-$OPENSSL_VER.tar.gz
- cd openssl-$OPENSSL_VER/
- ./config shared --prefix=$OPENSSL_PREFIX -DPURIFY > build.log 2>&1 || (cat build.log && exit 1)
- make -j$JOBS > build.log 2>&1 || (cat build.log && exit 1)
- sudo make PATH=$PATH install_sw > build.log 2>&1 || (cat build.log && exit 1)
- cd ../mockeagain/ && make CC=$CC -j$JOBS && cd ..
- export PATH=$PWD/work/nginx/sbin:$PWD/nginx-devel-utils:$PATH
- export LD_PRELOAD=$PWD/mockeagain/mockeagain.so
- export LD_LIBRARY_PATH=$PWD/mockeagain:$LD_LIBRARY_PATH
- export TEST_NGINX_RESOLVER=8.8.4.4
- export NGX_BUILD_CC=$CC
- ngx-build $NGINX_VERSION --with-ipv6 --with-http_realip_module --with-http_ssl_module --with-cc-opt="-I$OPENSSL_INC" --with-ld-opt="-L$OPENSSL_LIB -Wl,-rpath,$OPENSSL_LIB" --add-module=../ndk-nginx-module --add-module=../lua-nginx-module --with-debug > build.log 2>&1 || (cat build.log && exit 1)
- nginx -V
- ldd `which nginx`|grep -E 'luajit|ssl|pcre'
- prove -r t

18
Makefile Normal file
View File

@ -0,0 +1,18 @@
OPENRESTY_PREFIX=/usr/local/openresty
PREFIX ?= /usr/local
LUA_INCLUDE_DIR ?= $(PREFIX)/include
LUA_LIB_DIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION)
INSTALL ?= install
.PHONY: all test install
all: ;
install: all
$(INSTALL) -d $(DESTDIR)/$(LUA_LIB_DIR)/resty/
$(INSTALL) lib/resty/*.lua $(DESTDIR)/$(LUA_LIB_DIR)/resty/
test: all
PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH prove -I../test-nginx/lib -r t

419
README.markdown Normal file
View File

@ -0,0 +1,419 @@
Name
====
lua-resty-lock - Simple shm-based nonblocking lock API
Table of Contents
=================
* [Name](#name)
* [Status](#status)
* [Synopsis](#synopsis)
* [Description](#description)
* [Methods](#methods)
* [new](#new)
* [lock](#lock)
* [unlock](#unlock)
* [expire](#expire)
* [For Multiple Lua Light Threads](#for-multiple-lua-light-threads)
* [For Cache Locks](#for-cache-locks)
* [Limitations](#limitations)
* [Prerequisites](#prerequisites)
* [Installation](#installation)
* [TODO](#todo)
* [Community](#community)
* [English Mailing List](#english-mailing-list)
* [Chinese Mailing List](#chinese-mailing-list)
* [Bugs and Patches](#bugs-and-patches)
* [Author](#author)
* [Copyright and License](#copyright-and-license)
* [See Also](#see-also)
Status
======
This library is still under early development and is production ready.
Synopsis
========
```lua
# nginx.conf
http {
# you do not need the following line if you are using the
# OpenResty bundle:
lua_package_path "/path/to/lua-resty-core/lib/?.lua;/path/to/lua-resty-lock/lib/?.lua;;";
lua_shared_dict my_locks 100k;
server {
...
location = /t {
content_by_lua '
local resty_lock = require "resty.lock"
for i = 1, 2 do
local lock, err = resty_lock:new("my_locks")
if not lock then
ngx.say("failed to create lock: ", err)
end
local elapsed, err = lock:lock("my_key")
ngx.say("lock: ", elapsed, ", ", err)
local ok, err = lock:unlock()
if not ok then
ngx.say("failed to unlock: ", err)
end
ngx.say("unlock: ", ok)
end
';
}
}
}
```
Description
===========
This library implements a simple mutex lock in a similar way to ngx_proxy module's [proxy_cache_lock directive](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_lock).
Under the hood, this library uses [ngx_lua](https://github.com/openresty/lua-nginx-module) module's shared memory dictionaries. The lock waiting is nonblocking because we use stepwise [ngx.sleep](https://github.com/openresty/lua-nginx-module#ngxsleep) to poll the lock periodically.
[Back to TOC](#table-of-contents)
Methods
=======
To load this library,
1. you need to specify this library's path in ngx_lua's [lua_package_path](https://github.com/openresty/lua-nginx-module#lua_package_path) directive. For example, `lua_package_path "/path/to/lua-resty-lock/lib/?.lua;;";`.
2. you use `require` to load the library into a local Lua variable:
```lua
local lock = require "resty.lock"
```
[Back to TOC](#table-of-contents)
new
---
`syntax: obj, err = lock:new(dict_name)`
`syntax: obj, err = lock:new(dict_name, opts)`
Creates a new lock object instance by specifying the shared dictionary name (created by [lua_shared_dict](http://https://github.com/openresty/lua-nginx-module#lua_shared_dict)) and an optional options table `opts`.
In case of failure, returns `nil` and a string describing the error.
The options table accepts the following options:
* `exptime`
Specifies expiration time (in seconds) for the lock entry in the shared memory dictionary. You can specify up to `0.001` seconds. Default to 30 (seconds). Even if the invoker does not call `unlock` or the object holding the lock is not GC'd, the lock will be released after this time. So deadlock won't happen even when the worker process holding the lock crashes.
* `timeout`
Specifies the maximal waiting time (in seconds) for the [lock](#lock) method calls on the current object instance. You can specify up to `0.001` seconds. Default to 5 (seconds). This option value cannot be bigger than `exptime`. This timeout is to prevent a [lock](#lock) method call from waiting forever.
You can specify `0` to make the [lock](#lock) method return immediately without waiting if it cannot acquire the lock right away.
* `step`
Specifies the initial step (in seconds) of sleeping when waiting for the lock. Default to `0.001` (seconds). When the [lock](#lock) method is waiting on a busy lock, it sleeps by steps. The step size is increased by a ratio (specified by the `ratio` option) until reaching the step size limit (specified by the `max_step` option).
* `ratio`
Specifies the step increasing ratio. Default to 2, that is, the step size doubles at each waiting iteration.
* `max_step`
Specifies the maximal step size (i.e., sleep interval, in seconds) allowed. See also the `step` and `ratio` options). Default to 0.5 (seconds).
[Back to TOC](#table-of-contents)
lock
----
`syntax: elapsed, err = obj:lock(key)`
Tries to lock a key across all the Nginx worker processes in the current Nginx server instance. Different keys are different locks.
The length of the key string must not be larger than 65535 bytes.
Returns the waiting time (in seconds) if the lock is successfully acquired. Otherwise returns `nil` and a string describing the error.
The waiting time is not from the wallclock, but rather is from simply adding up all the waiting "steps". A nonzero `elapsed` return value indicates that someone else has just hold this lock. But a zero return value cannot gurantee that no one else has just acquired and released the lock.
When this method is waiting on fetching the lock, no operating system threads will be blocked and the current Lua "light thread" will be automatically yielded behind the scene.
It is strongly recommended to always call the [unlock()](#unlock) method to actively release the lock as soon as possible.
If the [unlock()](#unlock) method is never called after this method call, the lock will get released when
1. the current `resty.lock` object instance is collected automatically by the Lua GC.
2. the `exptime` for the lock entry is reached.
Common errors for this method call is
* "timeout"
: The timeout threshold specified by the `timeout` option of the [new](#new) method is exceeded.
* "locked"
: The current `resty.lock` object instance is already holding a lock (not necessarily of the same key).
Other possible errors are from ngx_lua's shared dictionary API.
It is required to create different `resty.lock` instances for multiple simultaneous locks (i.e., those around different keys).
[Back to TOC](#table-of-contents)
unlock
------
`syntax: ok, err = obj:unlock()`
Releases the lock held by the current `resty.lock` object instance.
Returns `1` on success. Returns `nil` and a string describing the error otherwise.
If you call `unlock` when no lock is currently held, the error "unlocked" will be returned.
[Back to TOC](#table-of-contents)
expire
------
`syntax: ok, err = obj:expire(timeout)`
Sets the TTL of the lock held by the current `resty.lock` object instance. This will reset the
timeout of the lock to `timeout` seconds if it is given, otherwise the `timeout` provided while
calling [new](#new) will be used.
Note that the `timeout` supplied inside this function is independent from the `timeout` provided while
calling [new](#new). Calling `expire()` will not change the `timeout` value specified inside [new](#new)
and subsequent `expire(nil)` call will still use the `timeout` number from [new](#new).
Returns `true` on success. Returns `nil` and a string describing the error otherwise.
If you call `expire` when no lock is currently held, the error "unlocked" will be returned.
[Back to TOC](#table-of-contents)
For Multiple Lua Light Threads
==============================
It is always a bad idea to share a single `resty.lock` object instance across multiple ngx_lua "light threads" because the object itself is stateful and is vulnerable to race conditions. It is highly recommended to always allocate a separate `resty.lock` object instance for each "light thread" that needs one.
[Back to TOC](#table-of-contents)
For Cache Locks
===============
One common use case for this library is avoid the so-called "dog-pile effect", that is, to limit concurrent backend queries for the same key when a cache miss happens. This usage is similar to the standard ngx_proxy module's [proxy_cache_lock](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_lock) directive.
The basic workflow for a cache lock is as follows:
1. Check the cache for a hit with the key. If a cache miss happens, proceed to step 2.
2. Instantiate a `resty.lock` object, call the [lock](#lock) method on the key, and check the 1st return value, i.e., the lock waiting time. If it is `nil`, handle the error; otherwise proceed to step 3.
3. Check the cache again for a hit. If it is still a miss, proceed to step 4; otherwise release the lock by calling [unlock](#unlock) and then return the cached value.
4. Query the backend (the data source) for the value, put the result into the cache, and then release the lock currently held by calling [unlock](#unlock).
Below is a kinda complete code example that demonstrates the idea.
```lua
local resty_lock = require "resty.lock"
local cache = ngx.shared.my_cache
-- step 1:
local val, err = cache:get(key)
if val then
ngx.say("result: ", val)
return
end
if err then
return fail("failed to get key from shm: ", err)
end
-- cache miss!
-- step 2:
local lock, err = resty_lock:new("my_locks")
if not lock then
return fail("failed to create lock: ", err)
end
local elapsed, err = lock:lock(key)
if not elapsed then
return fail("failed to acquire the lock: ", err)
end
-- lock successfully acquired!
-- step 3:
-- someone might have already put the value into the cache
-- so we check it here again:
val, err = cache:get(key)
if val then
local ok, err = lock:unlock()
if not ok then
return fail("failed to unlock: ", err)
end
ngx.say("result: ", val)
return
end
--- step 4:
local val = fetch_redis(key)
if not val then
local ok, err = lock:unlock()
if not ok then
return fail("failed to unlock: ", err)
end
-- FIXME: we should handle the backend miss more carefully
-- here, like inserting a stub value into the cache.
ngx.say("no value found")
return
end
-- update the shm cache with the newly fetched value
local ok, err = cache:set(key, val, 1)
if not ok then
local ok, err = lock:unlock()
if not ok then
return fail("failed to unlock: ", err)
end
return fail("failed to update shm cache: ", err)
end
local ok, err = lock:unlock()
if not ok then
return fail("failed to unlock: ", err)
end
ngx.say("result: ", val)
```
Here we assume that we use the ngx_lua shared memory dictionary to cache the Redis query results and we have the following configurations in `nginx.conf`:
```nginx
# you may want to change the dictionary size for your cases.
lua_shared_dict my_cache 10m;
lua_shared_dict my_locks 1m;
```
The `my_cache` dictionary is for the data cache while the `my_locks` dictionary is for `resty.lock` itself.
Several important things to note in the example above:
1. You need to release the lock as soon as possible, even when some other unrelated errors happen.
2. You need to update the cache with the result got from the backend *before* releasing the lock so other threads already waiting on the lock can get cached value when they get the lock afterwards.
3. When the backend returns no value at all, we should handle the case carefully by inserting some stub value into the cache.
[Back to TOC](#table-of-contents)
Limitations
===========
Some of this library's API functions may yield. So do not call those functions in `ngx_lua` module contexts where yielding is not supported (yet), like `init_by_lua*`,
`init_worker_by_lua*`, `header_filter_by_lua*`, `body_filter_by_lua*`, `balancer_by_lua*`, and `log_by_lua*`.
[Back to TOC](#table-of-contents)
Prerequisites
=============
* [LuaJIT](http://luajit.org) 2.0+
* [ngx_lua](https://github.com/openresty/lua-nginx-module) 0.8.10+
[Back to TOC](#table-of-contents)
Installation
============
It is recommended to use the latest [OpenResty bundle](http://openresty.org) directly where this library
is bundled and enabled by default. At least OpenResty 1.4.2.9 is required. And you need to enable LuaJIT when building your OpenResty
bundle by passing the `--with-luajit` option to its `./configure` script. No extra Nginx configuration is required.
If you want to use this library with your own Nginx build (with ngx_lua), then you need to
ensure you are using at least ngx_lua 0.8.10. Also, You need to configure
the [lua_package_path](https://github.com/openresty/lua-nginx-module#lua_package_path) directive to
add the path of your lua-resty-lock and lua-resty-core source directories to ngx_lua's Lua module search path, as in
```nginx
# nginx.conf
http {
lua_package_path "/path/to/lua-resty-lock/lib/?.lua;/path/to/lua-resty-core/lib/?.lua;;";
...
}
```
and then load the library in Lua:
```lua
local resty_lock = require "resty.lock"
```
Note that this library depends on the [lua-resty-core](https://github.com/openresty/lua-resty-core) library
which is also enabled by default in the OpenResty bundle.
[Back to TOC](#table-of-contents)
TODO
====
* We should simplify the current implementation when LuaJIT 2.1 gets support for `__gc` metamethod on normal Lua tables. Right now we are using an FFI cdata and a ref/unref memo table to work around this, which is rather ugly and a bit inefficient.
[Back to TOC](#table-of-contents)
Community
=========
[Back to TOC](#table-of-contents)
English Mailing List
--------------------
The [openresty-en](https://groups.google.com/group/openresty-en) mailing list is for English speakers.
[Back to TOC](#table-of-contents)
Chinese Mailing List
--------------------
The [openresty](https://groups.google.com/group/openresty) mailing list is for Chinese speakers.
[Back to TOC](#table-of-contents)
Bugs and Patches
================
Please report bugs or submit patches by
1. creating a ticket on the [GitHub Issue Tracker](https://github.com/openresty/lua-resty-lock/issues),
1. or posting to the [OpenResty community](#community).
[Back to TOC](#table-of-contents)
Author
======
Yichun "agentzh" Zhang (章亦春) <agentzh@gmail.com>, OpenResty Inc.
[Back to TOC](#table-of-contents)
Copyright and License
=====================
This module is licensed under the BSD license.
Copyright (C) 2013-2019, by Yichun "agentzh" Zhang, OpenResty Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
[Back to TOC](#table-of-contents)
See Also
========
* the ngx_lua module: https://github.com/openresty/lua-nginx-module
* OpenResty: http://openresty.org
[Back to TOC](#table-of-contents)

10
dist.ini Normal file
View File

@ -0,0 +1,10 @@
name=lua-resty-lock
abstract=Simple shm-based nonblocking lock API
author=Yichun Zhang (agentzh)
is_original=yes
license=2bsd
lib_dir=lib
doc_dir=lib
repo_link=https://github.com/openresty/lua-resty-lock
main_module=lib/resty/lock.lua
requires = luajit

222
lib/resty/lock.lua Normal file
View File

@ -0,0 +1,222 @@
-- Copyright (C) Yichun Zhang (agentzh)
require "resty.core.shdict" -- enforce this to avoid dead locks
local ffi = require "ffi"
local ffi_new = ffi.new
local shared = ngx.shared
local sleep = ngx.sleep
local log = ngx.log
local max = math.max
local min = math.min
local debug = ngx.config.debug
local setmetatable = setmetatable
local tonumber = tonumber
local _M = { _VERSION = '0.08' }
local mt = { __index = _M }
local ERR = ngx.ERR
local FREE_LIST_REF = 0
-- FIXME: we don't need this when we have __gc metamethod support on Lua
-- tables.
local memo = {}
if debug then _M.memo = memo end
local function ref_obj(key)
if key == nil then
return -1
end
local ref = memo[FREE_LIST_REF]
if ref and ref ~= 0 then
memo[FREE_LIST_REF] = memo[ref]
else
ref = #memo + 1
end
memo[ref] = key
-- print("ref key_id returned ", ref)
return ref
end
if debug then _M.ref_obj = ref_obj end
local function unref_obj(ref)
if ref >= 0 then
memo[ref] = memo[FREE_LIST_REF]
memo[FREE_LIST_REF] = ref
end
end
if debug then _M.unref_obj = unref_obj end
local function gc_lock(cdata)
local dict_id = tonumber(cdata.dict_id)
local key_id = tonumber(cdata.key_id)
-- print("key_id: ", key_id, ", key: ", memo[key_id], "dict: ",
-- type(memo[cdata.dict_id]))
if key_id > 0 then
local key = memo[key_id]
unref_obj(key_id)
local dict = memo[dict_id]
-- print("dict.delete type: ", type(dict.delete))
local ok, err = dict:delete(key)
if not ok then
log(ERR, 'failed to delete key "', key, '": ', err)
end
cdata.key_id = 0
end
unref_obj(dict_id)
end
local ctype = ffi.metatype("struct { int key_id; int dict_id; }",
{ __gc = gc_lock })
function _M.new(_, dict_name, opts)
local dict = shared[dict_name]
if not dict then
return nil, "dictionary not found"
end
local cdata = ffi_new(ctype)
cdata.key_id = 0
cdata.dict_id = ref_obj(dict)
local timeout, exptime, step, ratio, max_step
if opts then
timeout = opts.timeout
exptime = opts.exptime
step = opts.step
ratio = opts.ratio
max_step = opts.max_step
end
if not exptime then
exptime = 30
end
if timeout then
timeout = min(timeout, exptime)
if step then
step = min(step, timeout)
end
end
local self = {
cdata = cdata,
dict = dict,
timeout = timeout or 5,
exptime = exptime,
step = step or 0.001,
ratio = ratio or 2,
max_step = max_step or 0.5,
}
setmetatable(self, mt)
return self
end
function _M.lock(self, key)
if not key then
return nil, "nil key"
end
local dict = self.dict
local cdata = self.cdata
if cdata.key_id > 0 then
return nil, "locked"
end
local exptime = self.exptime
local ok, err = dict:add(key, true, exptime)
if ok then
cdata.key_id = ref_obj(key)
self.key = key
return 0
end
if err ~= "exists" then
return nil, err
end
-- lock held by others
local step = self.step
local ratio = self.ratio
local timeout = self.timeout
local max_step = self.max_step
local elapsed = 0
while timeout > 0 do
sleep(step)
elapsed = elapsed + step
timeout = timeout - step
local ok, err = dict:add(key, true, exptime)
if ok then
cdata.key_id = ref_obj(key)
self.key = key
return elapsed
end
if err ~= "exists" then
return nil, err
end
if timeout <= 0 then
break
end
step = min(max(0.001, step * ratio), timeout, max_step)
end
return nil, "timeout"
end
function _M.unlock(self)
local dict = self.dict
local cdata = self.cdata
local key_id = tonumber(cdata.key_id)
if key_id <= 0 then
return nil, "unlocked"
end
local key = memo[key_id]
unref_obj(key_id)
local ok, err = dict:delete(key)
if not ok then
return nil, err
end
cdata.key_id = 0
return 1
end
function _M.expire(self, time)
local dict = self.dict
local cdata = self.cdata
local key_id = tonumber(cdata.key_id)
if key_id <= 0 then
return nil, "unlocked"
end
if not time then
time = self.exptime
end
local ok, err = dict:replace(self.key, true, time)
if not ok then
return nil, err
end
return true
end
return _M

546
t/sanity.t Normal file
View File

@ -0,0 +1,546 @@
# vim:set ft= ts=4 sw=4 et:
use Test::Nginx::Socket::Lua;
use Cwd qw(cwd);
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
my $pwd = cwd();
our $HttpConfig = qq{
lua_package_path "../lua-resty-core/lib/?.lua;lib/?.lua;;";
lua_package_cpath "/usr/local/openresty-debug/lualib/?.so;/usr/local/openresty/lualib/?.so;;";
lua_shared_dict cache_locks 100k;
};
$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8';
$ENV{TEST_NGINX_REDIS_PORT} ||= 6379;
no_long_string();
#no_diff();
run_tests();
__DATA__
=== TEST 1: lock is subject to garbage collection
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua_block {
local lock = require "resty.lock"
for i = 1, 2 do
collectgarbage("collect")
local lock = lock:new("cache_locks")
local elapsed, err = lock:lock("foo")
ngx.say("lock: ", elapsed, ", ", err)
end
collectgarbage("collect")
}
}
--- request
GET /t
--- response_body
lock: 0, nil
lock: 0, nil
--- no_error_log
[error]
=== TEST 2: serial lock and unlock
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua_block {
local lock = require "resty.lock"
for i = 1, 2 do
local lock = lock:new("cache_locks")
local elapsed, err = lock:lock("foo")
ngx.say("lock: ", elapsed, ", ", err)
local ok, err = lock:unlock()
if not ok then
ngx.say("failed to unlock: ", err)
end
ngx.say("unlock: ", ok)
end
}
}
--- request
GET /t
--- response_body
lock: 0, nil
unlock: 1
lock: 0, nil
unlock: 1
--- no_error_log
[error]
=== TEST 3: timed out locks
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua_block {
local lock = require "resty.lock"
for i = 1, 2 do
local lock1 = lock:new("cache_locks", { timeout = 0.01 })
local lock2 = lock:new("cache_locks", { timeout = 0.01 })
local elapsed, err = lock1:lock("foo")
ngx.say("lock 1: lock: ", elapsed, ", ", err)
local elapsed, err = lock2:lock("foo")
ngx.say("lock 2: lock: ", elapsed, ", ", err)
local ok, err = lock1:unlock()
ngx.say("lock 1: unlock: ", ok, ", ", err)
local ok, err = lock2:unlock()
ngx.say("lock 2: unlock: ", ok, ", ", err)
end
}
}
--- request
GET /t
--- response_body
lock 1: lock: 0, nil
lock 2: lock: nil, timeout
lock 1: unlock: 1, nil
lock 2: unlock: nil, unlocked
lock 1: lock: 0, nil
lock 2: lock: nil, timeout
lock 1: unlock: 1, nil
lock 2: unlock: nil, unlocked
--- no_error_log
[error]
=== TEST 4: waited locks
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua_block {
local resty_lock = require "resty.lock"
local key = "blah"
local t, err = ngx.thread.spawn(function ()
local lock = resty_lock:new("cache_locks")
local elapsed, err = lock:lock(key)
ngx.say("sub thread: lock: ", elapsed, " ", err)
ngx.sleep(0.1)
ngx.say("sub thread: unlock: ", lock:unlock(key))
end)
local lock = resty_lock:new("cache_locks")
local elapsed, err = lock:lock(key)
ngx.say("main thread: lock: ", elapsed, " ", err)
ngx.say("main thread: unlock: ", lock:unlock())
}
}
--- request
GET /t
--- response_body_like chop
^sub thread: lock: 0 nil
sub thread: unlock: 1
main thread: lock: 0.12[6-9] nil
main thread: unlock: 1
$
--- no_error_log
[error]
=== TEST 5: waited locks (custom step)
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua_block {
local resty_lock = require "resty.lock"
local key = "blah"
local t, err = ngx.thread.spawn(function ()
local lock = resty_lock:new("cache_locks")
local elapsed, err = lock:lock(key)
ngx.say("sub thread: lock: ", elapsed, " ", err)
ngx.sleep(0.1)
ngx.say("sub thread: unlock: ", lock:unlock(key))
end)
local lock = resty_lock:new("cache_locks", { step = 0.01 })
local elapsed, err = lock:lock(key)
ngx.say("main thread: lock: ", elapsed, " ", err)
ngx.say("main thread: unlock: ", lock:unlock())
}
}
--- request
GET /t
--- response_body_like chop
^sub thread: lock: 0 nil
sub thread: unlock: 1
main thread: lock: 0.1[4-5]\d* nil
main thread: unlock: 1
$
--- no_error_log
[error]
=== TEST 6: waited locks (custom ratio)
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua_block {
local resty_lock = require "resty.lock"
local key = "blah"
local t, err = ngx.thread.spawn(function ()
local lock = resty_lock:new("cache_locks")
local elapsed, err = lock:lock(key)
ngx.say("sub thread: lock: ", elapsed, " ", err)
ngx.sleep(0.1)
ngx.say("sub thread: unlock: ", lock:unlock(key))
end)
local lock = resty_lock:new("cache_locks", { ratio = 3 })
local elapsed, err = lock:lock(key)
ngx.say("main thread: lock: ", elapsed, " ", err)
ngx.say("main thread: unlock: ", lock:unlock())
}
}
--- request
GET /t
--- response_body_like chop
^sub thread: lock: 0 nil
sub thread: unlock: 1
main thread: lock: 0.1[2]\d* nil
main thread: unlock: 1
$
--- no_error_log
[error]
=== TEST 7: waited locks (custom max step)
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua_block {
local resty_lock = require "resty.lock"
local key = "blah"
local t, err = ngx.thread.spawn(function ()
local lock = resty_lock:new("cache_locks")
local elapsed, err = lock:lock(key)
ngx.say("sub thread: lock: ", elapsed, " ", err)
ngx.sleep(0.1)
ngx.say("sub thread: unlock: ", lock:unlock(key))
end)
local lock = resty_lock:new("cache_locks", { max_step = 0.05 })
local elapsed, err = lock:lock(key)
ngx.say("main thread: lock: ", elapsed, " ", err)
ngx.say("main thread: unlock: ", lock:unlock())
}
}
--- request
GET /t
--- response_body_like chop
^sub thread: lock: 0 nil
sub thread: unlock: 1
main thread: lock: 0.11[2-4]\d* nil
main thread: unlock: 1
$
--- no_error_log
[error]
=== TEST 8: lock expired by itself
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua_block {
local resty_lock = require "resty.lock"
local key = "blah"
local t, err = ngx.thread.spawn(function ()
local lock = resty_lock:new("cache_locks", { exptime = 0.1 })
local elapsed, err = lock:lock(key)
ngx.say("sub thread: lock: ", elapsed, " ", err)
ngx.sleep(0.1)
-- ngx.say("sub thread: unlock: ", lock:unlock(key))
end)
local lock = resty_lock:new("cache_locks", { max_step = 0.05 })
local elapsed, err = lock:lock(key)
ngx.say("main thread: lock: ", elapsed, " ", err)
ngx.say("main thread: unlock: ", lock:unlock())
}
}
--- request
GET /t
--- response_body_like chop
^sub thread: lock: 0 nil
main thread: lock: 0.11[2-4]\d* nil
main thread: unlock: 1
$
--- no_error_log
[error]
=== TEST 9: ref & unref (1 at most)
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua_block {
local lock = require "resty.lock"
local memo = lock.memo
local ref = lock.ref_obj("foo")
ngx.say(#memo)
lock.unref_obj(ref)
ngx.say(#memo)
ref = lock.ref_obj("bar")
ngx.say(#memo)
lock.unref_obj(ref)
ngx.say(#memo)
}
}
--- request
GET /t
--- response_body
1
0
1
0
--- no_error_log
[error]
--- skip_eval: 3: system("$NginxBinary -V 2>&1 | grep -- '--with-debug'") ne 0
=== TEST 10: ref & unref (2 at most)
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua_block {
local lock = require "resty.lock"
local memo = lock.memo
for i = 1, 2 do
local refs = {}
refs[1] = lock.ref_obj("foo")
ngx.say(#memo)
refs[2] = lock.ref_obj("bar")
ngx.say(#memo)
lock.unref_obj(refs[1])
ngx.say(#memo)
lock.unref_obj(refs[2])
ngx.say(#memo)
end
}
}
--- request
GET /t
--- response_body
1
2
2
2
2
2
1
1
--- no_error_log
[error]
--- skip_eval: 3: system("$NginxBinary -V 2>&1 | grep -- '--with-debug'") ne 0
=== TEST 11: lock on a nil key
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua_block {
local lock = require "resty.lock"
local lock = lock:new("cache_locks")
local elapsed, err = lock:lock(nil)
if elapsed then
ngx.say("lock: ", elapsed, ", ", err)
local ok, err = lock:unlock()
if not ok then
ngx.say("failed to unlock: ", err)
end
else
ngx.say("failed to lock: ", err)
end
}
}
--- request
GET /t
--- response_body
failed to lock: nil key
--- no_error_log
[error]
=== TEST 12: same shdict, multple locks
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua_block {
local lock = require "resty.lock"
local memo = lock.memo
local lock1 = lock:new("cache_locks", { timeout = 0.01 })
for i = 1, 3 do
lock1:lock("lock_key")
lock1:unlock()
collectgarbage("collect")
end
local lock2 = lock:new("cache_locks", { timeout = 0.01 })
local lock3 = lock:new("cache_locks", { timeout = 0.01 })
lock2:lock("lock_key")
lock3:lock("lock_key")
collectgarbage("collect")
ngx.say(#memo)
lock2:unlock()
lock3:unlock()
collectgarbage("collect")
}
}
--- request
GET /t
--- response_body
4
--- no_error_log
[error]
--- skip_eval: 3: system("$NginxBinary -V 2>&1 | grep -- '--with-debug'") ne 0
=== TEST 13: timed out locks (0 timeout)
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua_block {
local lock = require "resty.lock"
for i = 1, 2 do
local lock1 = lock:new("cache_locks", { timeout = 0 })
local lock2 = lock:new("cache_locks", { timeout = 0 })
local elapsed, err = lock1:lock("foo")
ngx.say("lock 1: lock: ", elapsed, ", ", err)
local elapsed, err = lock2:lock("foo")
ngx.say("lock 2: lock: ", elapsed, ", ", err)
local ok, err = lock1:unlock()
ngx.say("lock 1: unlock: ", ok, ", ", err)
local ok, err = lock2:unlock()
ngx.say("lock 2: unlock: ", ok, ", ", err)
end
}
}
--- request
GET /t
--- response_body
lock 1: lock: 0, nil
lock 2: lock: nil, timeout
lock 1: unlock: 1, nil
lock 2: unlock: nil, unlocked
lock 1: lock: 0, nil
lock 2: lock: nil, timeout
lock 1: unlock: 1, nil
lock 2: unlock: nil, unlocked
--- no_error_log
[error]
=== TEST 13: expire()
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua_block {
local lock = require "resty.lock"
for i = 1, 2 do
local lock1 = lock:new("cache_locks", { timeout = 0, exptime = 0.1 })
local lock2 = lock:new("cache_locks", { timeout = 0, exptime = 0.1 })
local exp, err = lock1:expire()
ngx.say("lock 1: expire: ", exp, ", ", err)
local elapsed, err = lock1:lock("foo")
ngx.say("lock 1: lock: ", elapsed, ", ", err)
ngx.sleep(0.06)
local exp, err = lock1:expire()
ngx.say("lock 1: expire: ", exp, ", ", err)
ngx.sleep(0.06)
local elapsed, err = lock2:lock("foo")
ngx.say("lock 2: lock: ", elapsed, ", ", err)
local exp, err = lock1:expire(0.2)
ngx.say("lock 1: expire: ", exp, ", ", err)
ngx.sleep(0.15)
local elapsed, err = lock2:lock("foo")
ngx.say("lock 2: lock: ", elapsed, ", ", err)
ngx.sleep(0.1)
local elapsed, err = lock2:lock("foo")
ngx.say("lock 2: lock: ", elapsed, ", ", err)
local ok, err = lock2:unlock()
ngx.say("lock 2: unlock: ", ok, ", ", err)
local exp, err = lock2:expire(0.2)
ngx.say("lock 2: expire: ", exp, ", ", err)
end
}
}
--- request
GET /t
--- response_body
lock 1: expire: nil, unlocked
lock 1: lock: 0, nil
lock 1: expire: true, nil
lock 2: lock: nil, timeout
lock 1: expire: true, nil
lock 2: lock: nil, timeout
lock 2: lock: 0, nil
lock 2: unlock: 1, nil
lock 2: expire: nil, unlocked
lock 1: expire: nil, unlocked
lock 1: lock: 0, nil
lock 1: expire: true, nil
lock 2: lock: nil, timeout
lock 1: expire: true, nil
lock 2: lock: nil, timeout
lock 2: lock: 0, nil
lock 2: unlock: 1, nil
lock 2: expire: nil, unlocked
--- no_error_log
[error]

69
valgrind.suppress Normal file
View File

@ -0,0 +1,69 @@
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_event_process_init
}
{
<insert_a_suppression_name_here>
Memcheck:Param
epoll_ctl(event)
fun:epoll_ctl
fun:ngx_epoll_add_event
}
{
<insert_a_suppression_name_here>
Memcheck:Cond
fun:index
fun:expand_dynamic_string_token
fun:_dl_map_object
fun:map_doit
fun:_dl_catch_error
fun:do_preload
fun:dl_main
fun:_dl_sysdep_start
fun:_dl_start
}
{
<insert_a_suppression_name_here>
Memcheck:Param
epoll_ctl(event)
fun:epoll_ctl
fun:ngx_epoll_init
fun:ngx_event_process_init
}
{
<insert_a_suppression_name_here>
Memcheck:Param
epoll_ctl(event)
fun:epoll_ctl
fun:ngx_epoll_notify_init
fun:ngx_epoll_init
}
{
<insert_a_suppression_name_here>
Memcheck:Param
epoll_ctl(event)
fun:epoll_ctl
fun:ngx_epoll_test_rdhup
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:ngx_alloc
fun:ngx_set_environment
fun:ngx_single_process_cycle
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:ngx_alloc
fun:ngx_set_environment
fun:ngx_worker_process_init
fun:ngx_worker_process_cycle
}