Squashed 'src/deps/src/lua-resty-lrucache/' content from commit a79615ec9

git-subtree-dir: src/deps/src/lua-resty-lrucache
git-subtree-split: a79615ec9dc547fdb4aaee59ef8f5a50648ce9fd
This commit is contained in:
Théophile Diot 2023-06-30 15:38:41 -04:00
commit c82b0bdd27
27 changed files with 3620 additions and 0 deletions

1
.gitattributes vendored Normal file
View File

@ -0,0 +1 @@
*.t linguist-language=Text

10
.gitignore vendored Normal file
View File

@ -0,0 +1,10 @@
*.swp
*.swo
*~
go
t/servroot/
reindex
nginx
ctags
tags
a.lua

45
.travis.yml Normal file
View File

@ -0,0 +1,45 @@
# vim:st=2 sts=2 sw=2 et:
os: linux
dist: bionic
sudo: false
language: c
compiler: gcc
env:
global:
- JOBS=3
- NGX_BUILD_JOBS=$JOBS
- LUAJIT_PREFIX=$TRAVIS_BUILD_DIR/luajit
- LUAJIT_LIB=$LUAJIT_PREFIX/lib
- LUAJIT_INC=$LUAJIT_PREFIX/include/luajit-2.1
- LD_LIBRARY_PATH=$LUAJIT_LIB:$LD_LIBRARY_PATH
- TEST_NGINX_SLEEP=0.006
- TEST_NGINX_RANDOMIZE=1
matrix:
- NGINX_VERSION=1.19.9
install:
- export NGX_BUILD_CC=$CC
- export PATH=$PWD/work/nginx/sbin:$PWD/nginx-devel-utils:$PATH
- sudo apt-get install -qq -y cpanminus axel
- sudo cpanm --notest Test::Nginx > build.log 2>&1 || (cat build.log && exit 1)
- git clone https://github.com/openresty/openresty.git ../openresty
- git clone https://github.com/openresty/nginx-devel-utils.git
- git clone https://github.com/simpl/ngx_devel_kit.git ../ndk-nginx-module
- git clone https://github.com/openresty/lua-nginx-module.git ../lua-nginx-module
- git clone https://github.com/openresty/lua-resty-core.git ../lua-resty-core
- git clone https://github.com/openresty/no-pool-nginx.git ../no-pool-nginx
- git clone -b v2.1-agentzh https://github.com/openresty/luajit2.git
- pushd luajit2/
- make -j$JOBS CCDEBUG=-g Q= PREFIX=$LUAJIT_PREFIX CC=$CC XCFLAGS='-DLUA_USE_APICHECK -DLUA_USE_ASSERT' > build.log 2>&1 || (cat build.log && exit 1)
- make install PREFIX=$LUAJIT_PREFIX > build.log 2>&1 || (cat build.log && exit 1)
- popd
- ngx-build $NGINX_VERSION --add-module=../ndk-nginx-module --add-module=../lua-nginx-module --with-debug > build.log 2>&1 || (cat build.log && exit 1)
- nginx -V
- ldd `which nginx`|grep -E 'luajit|ssl|pcre'
script:
- make lint
- prove -j$JOBS -I. -r t/

23
Makefile Normal file
View File

@ -0,0 +1,23 @@
OPENRESTY_PREFIX=/usr/local/openresty
PREFIX ?= /usr/local
LUA_INCLUDE_DIR ?= $(PREFIX)/include
LUA_LIB_DIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION)
INSTALL ?= install
.PHONY: all test install lint
all: ;
install: all
$(INSTALL) -d $(DESTDIR)/$(LUA_LIB_DIR)/resty/lrucache
$(INSTALL) lib/resty/*.lua $(DESTDIR)/$(LUA_LIB_DIR)/resty/
$(INSTALL) lib/resty/lrucache/*.lua $(DESTDIR)/$(LUA_LIB_DIR)/resty/lrucache/
test: all lint
PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH prove -I../test-nginx/lib -r t
lint:
@! grep -P -n --color -- 'require.*?resty\.lrucache[^.]' t/*pureffi*/*.t || (echo "ERROR: Found pureffi tests requiring 'resty.lrucache'." > /dev/stderr; exit 1)
@! grep -R -P -n --color --exclude-dir=pureffi --exclude=*mixed.t -- 'require.*?resty\.lrucache\.pureffi' t/*.t || (echo "ERROR: Found pure Lua tests requiring 'resty.lrucache.pureffi'." > /dev/stderr; exit 1)

388
README.markdown Normal file
View File

@ -0,0 +1,388 @@
Name
====
lua-resty-lrucache - Lua-land LRU cache based on the LuaJIT FFI.
Table of Contents
=================
* [Name](#name)
* [Status](#status)
* [Synopsis](#synopsis)
* [Description](#description)
* [Methods](#methods)
* [new](#new)
* [set](#set)
* [get](#get)
* [delete](#delete)
* [count](#count)
* [capacity](#capacity)
* [get_keys](#get_keys)
* [flush_all](#flush_all)
* [Prerequisites](#prerequisites)
* [Installation](#installation)
* [Community](#community)
* [English Mailing List](#english-mailing-list)
* [Chinese Mailing List](#chinese-mailing-list)
* [Bugs and Patches](#bugs-and-patches)
* [Author](#author)
* [Copyright and License](#copyright-and-license)
* [See Also](#see-also)
Status
======
This library is considered production ready.
Synopsis
========
```lua
-- file myapp.lua: example "myapp" module
local _M = {}
-- alternatively: local lrucache = require "resty.lrucache.pureffi"
local lrucache = require "resty.lrucache"
-- we need to initialize the cache on the lua module level so that
-- it can be shared by all the requests served by each nginx worker process:
local c, err = lrucache.new(200) -- allow up to 200 items in the cache
if not c then
error("failed to create the cache: " .. (err or "unknown"))
end
function _M.go()
c:set("dog", 32)
c:set("cat", 56)
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
c:set("dog", { age = 10 }, 0.1) -- expire in 0.1 sec
c:delete("dog")
c:flush_all() -- flush all the cached data
end
return _M
```
```nginx
# nginx.conf
http {
# only if not using an official OpenResty release
lua_package_path "/path/to/lua-resty-lrucache/lib/?.lua;;";
server {
listen 8080;
location = /t {
content_by_lua_block {
require("myapp").go()
}
}
}
}
```
Description
===========
This library implements a simple LRU cache for
[OpenResty](https://openresty.org) and the
[ngx_lua](https://github.com/openresty/lua-nginx-module) module.
This cache also supports expiration time.
The LRU cache resides completely in the Lua VM and is subject to Lua GC. As
such, do not expect it to get shared across the OS process boundary. The upside
is that you can cache arbitrary complex Lua values (such as deep nested Lua
tables) without the overhead of serialization (as with `ngx_lua`'s [shared
dictionary
API](https://github.com/openresty/lua-nginx-module#lua_shared_dict)).
The downside is that your cache is always limited to the current OS process
(i.e. the current Nginx worker process). It does not really make much sense to
use this library in the context of
[init_by_lua](https://github.com/openresty/lua-nginx-module#lua_shared_dict)
because the cache will not get shared by any of the worker processes (unless
you just want to "warm up" the cache with predefined items which will get
inherited by the workers via `fork()`).
This library offers two different implementations in the form of two classes:
`resty.lrucache` and `resty.lrucache.pureffi`. Both implement the same API.
The only difference is that the latter is a pure FFI implementation that also
implements an FFI-based hash table for the cache lookup, while the former uses
native Lua tables.
If the cache hit rate is relatively high, you should use the `resty.lrucache`
class which is faster than `resty.lrucache.pureffi`.
However, if the cache hit rate is relatively low and there can be a *lot* of
variations of keys inserted into and removed from the cache, then you should
use the `resty.lrucache.pureffi` instead, because Lua tables are not good at
removing keys frequently. You would likely see the `resizetab` function call in
the LuaJIT runtime being very hot in [on-CPU flame
graphs](https://github.com/openresty/stapxx#lj-lua-stacks) if you use the
`resty.lrucache` class instead of `resty.lrucache.pureffi` in such a use case.
[Back to TOC](#table-of-contents)
Methods
=======
To load this library,
1. use an official [OpenResty release](https://openresty.org) or follow the
[Installation](#installation) instructions.
2. use `require` to load the library into a local Lua variable:
```lua
local lrucache = require "resty.lrucache"
```
or
```lua
local lrucache = require "resty.lrucache.pureffi"
```
[Back to TOC](#table-of-contents)
new
---
`syntax: cache, err = lrucache.new(max_items [, load_factor])`
Creates a new cache instance. Upon failure, returns `nil` and a string
describing the error.
The `max_items` argument specifies the maximal number of items this cache can
hold.
The `load-factor` argument designates the "load factor" of the FFI-based
hash-table used internally by `resty.lrucache.pureffi`; the default value is
0.5 (i.e. 50%); if the load factor is specified, it will be clamped to the
range of `[0.1, 1]` (i.e. if load factor is greater than 1, it will be
saturated to 1; likewise, if load-factor is smaller than `0.1`, it will be
clamped to `0.1`). This argument is only meaningful for
`resty.lrucache.pureffi`.
[Back to TOC](#table-of-contents)
set
---
`syntax: cache:set(key, value, ttl?, flags?)`
Sets a key with a value and an expiration time.
When the cache is full, the cache will automatically evict the least recently
used item.
The optional `ttl` argument specifies the expiration time. The time value is in
seconds, but you can also specify the fraction number part (e.g. `0.25`). A nil
`ttl` argument means the value would never expire (which is the default).
The optional `flags` argument specifies a user flags value associated with the
item to be stored. It can be retrieved later with the item. The user flags are
stored as an unsigned 32-bit integer internally, and thus must be specified as
a Lua number. If not specified, flags will have a default value of `0`. This
argument was added in the `v0.10` release.
[Back to TOC](#table-of-contents)
get
---
`syntax: data, stale_data, flags = cache:get(key)`
Fetches a value with the key. If the key does not exist in the cache or has
already expired, `nil` will be returned.
Starting from `v0.03`, the stale data is also returned as the second return
value if available.
Starting from `v0.10`, the user flags value associated with the stored item is
also returned as the third return value. If no user flags were given to an
item, its default flags will be `0`.
[Back to TOC](#table-of-contents)
delete
------
`syntax: cache:delete(key)`
Removes an item specified by the key from the cache.
[Back to TOC](#table-of-contents)
count
-----
`syntax: count = cache:count()`
Returns the number of items currently stored in the cache **including**
expired items if any.
The returned `count` value will always be greater or equal to 0 and smaller
than or equal to the `size` argument given to [`cache:new`](#new).
This method was added in the `v0.10` release.
[Back to TOC](#table-of-contents)
capacity
--------
`syntax: size = cache:capacity()`
Returns the maximum number of items the cache can hold. The return value is the
same as the `size` argument given to [`cache:new`](#new) when the cache was
created.
This method was added in the `v0.10` release.
[Back to TOC](#table-of-contents)
get_keys
--------
`syntax: keys = cache:get_keys(max_count?, res?)`
Fetch the list of keys currently inside the cache up to `max_count`. The keys
will be ordered in MRU fashion (Most-Recently-Used keys first).
This function returns a Lua (array) table (with integer keys) containing the
keys.
When `max_count` is `nil` or `0`, all keys (if any) will be returned.
When provided with a `res` table argument, this function will not allocate a
table and will instead insert the keys in `res`, along with a trailing `nil`
value.
This method was added in the `v0.10` release.
[Back to TOC](#table-of-contents)
flush_all
---------
`syntax: cache:flush_all()`
Flushes all the existing data (if any) in the current cache instance. This is
an `O(1)` operation and should be much faster than creating a brand new cache
instance.
Note however that the `flush_all()` method of `resty.lrucache.pureffi` is an
`O(n)` operation.
[Back to TOC](#table-of-contents)
Prerequisites
=============
* [LuaJIT](http://luajit.org) 2.0+
* [ngx_lua](https://github.com/openresty/lua-nginx-module) 0.8.10+
[Back to TOC](#table-of-contents)
Installation
============
It is recommended to use the latest [OpenResty release](https://openresty.org).
At least OpenResty 1.4.2.9 is required. Recent versions of OpenResty only
support LuaJIT, but if you are using an older version, make sure to enable
LuaJIT when building OpenResty by passing the `--with-luajit` option to its
`./configure` script. No extra Nginx configuration is required.
If you want to use this library with your own Nginx build (with ngx_lua), then
you need to ensure you are using ngx_lua 0.8.10 or greater. When not using an
OpenResty release, you also need to configure the
[lua_package_path](https://github.com/openresty/lua-nginx-module#lua_package_path)
directive to add the path to your lua-resty-lrucache source tree to ngx_lua's
Lua module search path, as in:
```nginx
# nginx.conf
http {
# only if not using an official OpenResty release
lua_package_path "/path/to/lua-resty-lrucache/lib/?.lua;;";
...
}
```
and then load the library in Lua:
```lua
local lrucache = require "resty.lrucache"
```
[Back to TOC](#table-of-contents)
Community
=========
[Back to TOC](#table-of-contents)
English Mailing List
--------------------
The [openresty-en](https://groups.google.com/group/openresty-en) mailing list
is for English speakers.
[Back to TOC](#table-of-contents)
Chinese Mailing List
--------------------
The [openresty](https://groups.google.com/group/openresty) mailing list is for
Chinese speakers.
[Back to TOC](#table-of-contents)
Bugs and Patches
================
Please report bugs or submit patches by
1. creating a ticket on the [GitHub Issue
Tracker](https://github.com/openresty/lua-resty-lrucache/issues),
1. or posting to the [OpenResty community](#community).
[Back to TOC](#table-of-contents)
Author
======
Yichun "agentzh" Zhang (章亦春) <agentzh@gmail.com>, OpenResty Inc.
Shuxin Yang.
[Back to TOC](#table-of-contents)
Copyright and License
=====================
This module is licensed under the BSD license.
Copyright (C) 2014-2019, by Yichun "agentzh" Zhang, OpenResty Inc.
Copyright (C) 2014-2017, by Shuxin Yang.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
[Back to TOC](#table-of-contents)
See Also
========
* OpenResty: https://openresty.org
* the ngx_http_lua module: https://github.com/openresty/lua-nginx-module
* the ngx_stream_lua module: https://github.com/openresty/stream-lua-nginx-module
* the lua-resty-core library: https://github.com/openresty/lua-resty-core
[Back to TOC](#table-of-contents)

10
dist.ini Normal file
View File

@ -0,0 +1,10 @@
name=lua-resty-lrucache
abstract=Lua-land LRU Cache based on LuaJIT FFI
author=Yichun Zhang (agentzh)
is_original=yes
license=2bsd
lib_dir=lib
doc_dir=lib
repo_link=https://github.com/openresty/lua-resty-lrucache
main_module=lib/resty/lrucache.lua
requires = luajit

342
lib/resty/lrucache.lua Normal file
View File

@ -0,0 +1,342 @@
-- Copyright (C) Yichun Zhang (agentzh)
local ffi = require "ffi"
local ffi_new = ffi.new
local ffi_sizeof = ffi.sizeof
local ffi_cast = ffi.cast
local ffi_fill = ffi.fill
local ngx_now = ngx.now
local uintptr_t = ffi.typeof("uintptr_t")
local setmetatable = setmetatable
local tonumber = tonumber
local type = type
local new_tab
do
local ok
ok, new_tab = pcall(require, "table.new")
if not ok then
new_tab = function(narr, nrec) return {} end
end
end
if string.find(jit.version, " 2.0", 1, true) then
ngx.log(ngx.ALERT, "use of lua-resty-lrucache with LuaJIT 2.0 is ",
"not recommended; use LuaJIT 2.1+ instead")
end
local ok, tb_clear = pcall(require, "table.clear")
if not ok then
local pairs = pairs
tb_clear = function (tab)
for k, _ in pairs(tab) do
tab[k] = nil
end
end
end
-- queue data types
--
-- this queue is a double-ended queue and the first node
-- is reserved for the queue itself.
-- the implementation is mostly borrowed from nginx's ngx_queue_t data
-- structure.
ffi.cdef[[
typedef struct lrucache_queue_s lrucache_queue_t;
struct lrucache_queue_s {
double expire; /* in seconds */
lrucache_queue_t *prev;
lrucache_queue_t *next;
uint32_t user_flags;
};
]]
local queue_arr_type = ffi.typeof("lrucache_queue_t[?]")
local queue_type = ffi.typeof("lrucache_queue_t")
local NULL = ffi.null
-- queue utility functions
local function queue_insert_tail(h, x)
local last = h[0].prev
x.prev = last
last.next = x
x.next = h
h[0].prev = x
end
local function queue_init(size)
if not size then
size = 0
end
local q = ffi_new(queue_arr_type, size + 1)
ffi_fill(q, ffi_sizeof(queue_type, size + 1), 0)
if size == 0 then
q[0].prev = q
q[0].next = q
else
local prev = q[0]
for i = 1, size do
local e = q + i
e.user_flags = 0
prev.next = e
e.prev = prev
prev = e
end
local last = q[size]
last.next = q
q[0].prev = last
end
return q
end
local function queue_is_empty(q)
-- print("q: ", tostring(q), "q.prev: ", tostring(q), ": ", q == q.prev)
return q == q[0].prev
end
local function queue_remove(x)
local prev = x.prev
local next = x.next
next.prev = prev
prev.next = next
-- for debugging purpose only:
x.prev = NULL
x.next = NULL
end
local function queue_insert_head(h, x)
x.next = h[0].next
x.next.prev = x
x.prev = h
h[0].next = x
end
local function queue_last(h)
return h[0].prev
end
local function queue_head(h)
return h[0].next
end
-- true module stuffs
local _M = {
_VERSION = '0.13'
}
local mt = { __index = _M }
local function ptr2num(ptr)
local v = tonumber(ffi_cast(uintptr_t, ptr))
return v
end
function _M.new(size)
if size < 1 then
return nil, "size too small"
end
local self = {
hasht = {},
free_queue = queue_init(size),
cache_queue = queue_init(),
key2node = {},
node2key = {},
num_items = 0,
max_items = size,
}
setmetatable(self, mt)
return self
end
function _M.count(self)
return self.num_items
end
function _M.capacity(self)
return self.max_items
end
function _M.get(self, key)
local hasht = self.hasht
local val = hasht[key]
if val == nil then
return nil
end
local node = self.key2node[key]
-- print(key, ": moving node ", tostring(node), " to cache queue head")
local cache_queue = self.cache_queue
queue_remove(node)
queue_insert_head(cache_queue, node)
if node.expire >= 0 and node.expire < ngx_now() then
-- print("expired: ", node.expire, " > ", ngx_now())
return nil, val, node.user_flags
end
return val, nil, node.user_flags
end
function _M.delete(self, key)
self.hasht[key] = nil
local key2node = self.key2node
local node = key2node[key]
if not node then
return false
end
key2node[key] = nil
self.node2key[ptr2num(node)] = nil
queue_remove(node)
queue_insert_tail(self.free_queue, node)
self.num_items = self.num_items - 1
return true
end
function _M.set(self, key, value, ttl, flags)
local hasht = self.hasht
hasht[key] = value
local key2node = self.key2node
local node = key2node[key]
if not node then
local free_queue = self.free_queue
local node2key = self.node2key
if queue_is_empty(free_queue) then
-- evict the least recently used key
-- assert(not queue_is_empty(self.cache_queue))
node = queue_last(self.cache_queue)
local oldkey = node2key[ptr2num(node)]
-- print(key, ": evicting oldkey: ", oldkey, ", oldnode: ",
-- tostring(node))
if oldkey then
hasht[oldkey] = nil
key2node[oldkey] = nil
end
else
-- take a free queue node
node = queue_head(free_queue)
-- only add count if we are not evicting
self.num_items = self.num_items + 1
-- print(key, ": get a new free node: ", tostring(node))
end
node2key[ptr2num(node)] = key
key2node[key] = node
end
queue_remove(node)
queue_insert_head(self.cache_queue, node)
if ttl then
node.expire = ngx_now() + ttl
else
node.expire = -1
end
if type(flags) == "number" and flags >= 0 then
node.user_flags = flags
else
node.user_flags = 0
end
end
function _M.get_keys(self, max_count, res)
if not max_count or max_count == 0 then
max_count = self.num_items
end
if not res then
res = new_tab(max_count + 1, 0) -- + 1 for trailing hole
end
local cache_queue = self.cache_queue
local node2key = self.node2key
local i = 0
local node = queue_head(cache_queue)
while node ~= cache_queue do
if i >= max_count then
break
end
i = i + 1
res[i] = node2key[ptr2num(node)]
node = node.next
end
res[i + 1] = nil
return res
end
function _M.flush_all(self)
tb_clear(self.hasht)
tb_clear(self.node2key)
tb_clear(self.key2node)
self.num_items = 0
local cache_queue = self.cache_queue
local free_queue = self.free_queue
-- splice the cache_queue into free_queue
if not queue_is_empty(cache_queue) then
local free_head = free_queue[0]
local free_last = free_head.prev
local cache_head = cache_queue[0]
local cache_first = cache_head.next
local cache_last = cache_head.prev
free_last.next = cache_first
cache_first.prev = free_last
cache_last.next = free_head
free_head.prev = cache_last
cache_head.next = cache_queue
cache_head.prev = cache_queue
end
end
return _M

View File

@ -0,0 +1,606 @@
-- Copyright (C) Yichun Zhang (agentzh)
-- Copyright (C) Shuxin Yang
--[[
This module implements a key/value cache store. We adopt LRU as our
replace/evict policy. Each key/value pair is tagged with a Time-to-Live (TTL);
from user's perspective, stale pairs are automatically removed from the cache.
Why FFI
-------
In Lua, expression "table[key] = nil" does not *PHYSICALLY* remove the value
associated with the key; it just set the value to be nil! So the table will
keep growing with large number of the key/nil pairs which will be purged until
resize() operator is called.
This "feature" is terribly ill-suited to what we need. Therefore we have to
rely on FFI to build a hash-table where any entry can be physically deleted
immediately.
Under the hood:
--------------
In concept, we introduce three data structures to implement the cache store:
1. key/value vector for storing keys and values.
2. a queue to mimic the LRU.
3. hash-table for looking up the value for a given key.
Unfortunately, efficiency and clarity usually come at each other cost. The
data strucutres we are using are slightly more complicated than what we
described above.
o. Lua does not have efficient way to store a vector of pair. So, we use
two vectors for key/value pair: one for keys and the other for values
(_M.key_v and _M.val_v, respectively), and i-th key corresponds to
i-th value.
A key/value pair is identified by the "id" field in a "node" (we shall
discuss node later)
o. The queue is nothing more than a doubly-linked list of "node" linked via
lrucache_pureffi_queue_s::{next|prev} fields.
o. The hash-table has two parts:
- the _M.bucket_v[] a vector of bucket, indiced by hash-value, and
- a bucket is a singly-linked list of "node" via the
lrucache_pureffi_queue_s::conflict field.
A key must be a string, and the hash value of a key is evaluated by:
crc32(key-cast-to-pointer) % size(_M.bucket_v).
We mandate size(_M.bucket_v) being a power-of-two in order to avoid
expensive modulo operation.
At the heart of the module is an array of "node" (of type
lrucache_pureffi_queue_s). A node:
- keeps the meta-data of its corresponding key/value pair
(embodied by the "id", and "expire" field);
- is a part of LRU queue (embodied by "prev" and "next" fields);
- is a part of hash-table (embodied by the "conflict" field).
]]
local ffi = require "ffi"
local bit = require "bit"
local ffi_new = ffi.new
local ffi_sizeof = ffi.sizeof
local ffi_cast = ffi.cast
local ffi_fill = ffi.fill
local ngx_now = ngx.now
local uintptr_t = ffi.typeof("uintptr_t")
local c_str_t = ffi.typeof("const char*")
local int_t = ffi.typeof("int")
local int_array_t = ffi.typeof("int[?]")
local crc_tab = ffi.new("const unsigned int[256]", {
0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F,
0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2,
0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9,
0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C,
0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423,
0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, 0x01DB7106,
0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D,
0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950,
0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7,
0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA,
0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81,
0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84,
0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB,
0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8, 0xA1D1937E,
0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55,
0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28,
0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F,
0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242,
0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69,
0x616BFFD3, 0x166CCF45, 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC,
0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693,
0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D });
local setmetatable = setmetatable
local tonumber = tonumber
local tostring = tostring
local type = type
local brshift = bit.rshift
local bxor = bit.bxor
local band = bit.band
local new_tab
do
local ok
ok, new_tab = pcall(require, "table.new")
if not ok then
new_tab = function(narr, nrec) return {} end
end
end
-- queue data types
--
-- this queue is a double-ended queue and the first node
-- is reserved for the queue itself.
-- the implementation is mostly borrowed from nginx's ngx_queue_t data
-- structure.
ffi.cdef[[
/* A lrucache_pureffi_queue_s node hook together three data structures:
* o. the key/value store as embodied by the "id" (which is in essence the
* indentifier of key/pair pair) and the "expire" (which is a metadata
* of the corresponding key/pair pair).
* o. The LRU queue via the prev/next fields.
* o. The hash-tabble as embodied by the "conflict" field.
*/
typedef struct lrucache_pureffi_queue_s lrucache_pureffi_queue_t;
struct lrucache_pureffi_queue_s {
/* Each node is assigned a unique ID at construction time, and the
* ID remain immutatble, regardless the node is in active-list or
* free-list. The queue header is assigned ID 0. Since queue-header
* is a sentinel node, 0 denodes "invalid ID".
*
* Intuitively, we can view the "id" as the identifier of key/value
* pair.
*/
int id;
/* The bucket of the hash-table is implemented as a singly-linked list.
* The "conflict" refers to the ID of the next node in the bucket.
*/
int conflict;
uint32_t user_flags;
double expire; /* in seconds */
lrucache_pureffi_queue_t *prev;
lrucache_pureffi_queue_t *next;
};
]]
local queue_arr_type = ffi.typeof("lrucache_pureffi_queue_t[?]")
--local queue_ptr_type = ffi.typeof("lrucache_pureffi_queue_t*")
local queue_type = ffi.typeof("lrucache_pureffi_queue_t")
local NULL = ffi.null
--========================================================================
--
-- Queue utility functions
--
--========================================================================
-- Append the element "x" to the given queue "h".
local function queue_insert_tail(h, x)
local last = h[0].prev
x.prev = last
last.next = x
x.next = h
h[0].prev = x
end
--[[
Allocate a queue with size + 1 elements. Elements are linked together in a
circular way, i.e. the last element's "next" points to the first element,
while the first element's "prev" element points to the last element.
]]
local function queue_init(size)
if not size then
size = 0
end
local q = ffi_new(queue_arr_type, size + 1)
ffi_fill(q, ffi_sizeof(queue_type, size + 1), 0)
if size == 0 then
q[0].prev = q
q[0].next = q
else
local prev = q[0]
for i = 1, size do
local e = q[i]
e.id = i
e.user_flags = 0
prev.next = e
e.prev = prev
prev = e
end
local last = q[size]
last.next = q
q[0].prev = last
end
return q
end
local function queue_is_empty(q)
-- print("q: ", tostring(q), "q.prev: ", tostring(q), ": ", q == q.prev)
return q == q[0].prev
end
local function queue_remove(x)
local prev = x.prev
local next = x.next
next.prev = prev
prev.next = next
-- for debugging purpose only:
x.prev = NULL
x.next = NULL
end
-- Insert the element "x" the to the given queue "h"
local function queue_insert_head(h, x)
x.next = h[0].next
x.next.prev = x
x.prev = h
h[0].next = x
end
local function queue_last(h)
return h[0].prev
end
local function queue_head(h)
return h[0].next
end
--========================================================================
--
-- Miscellaneous Utility Functions
--
--========================================================================
local function ptr2num(ptr)
return tonumber(ffi_cast(uintptr_t, ptr))
end
local function crc32_ptr(ptr)
local p = brshift(ptr2num(ptr), 3)
local b = band(p, 255)
local crc32 = crc_tab[b]
b = band(brshift(p, 8), 255)
crc32 = bxor(brshift(crc32, 8), crc_tab[band(bxor(crc32, b), 255)])
b = band(brshift(p, 16), 255)
crc32 = bxor(brshift(crc32, 8), crc_tab[band(bxor(crc32, b), 255)])
--b = band(brshift(p, 24), 255)
--crc32 = bxor(brshift(crc32, 8), crc_tab[band(bxor(crc32, b), 255)])
return crc32
end
--========================================================================
--
-- Implementation of "export" functions
--
--========================================================================
local _M = {
_VERSION = '0.13'
}
local mt = { __index = _M }
-- "size" specifies the maximum number of entries in the LRU queue, and the
-- "load_factor" designates the 'load factor' of the hash-table we are using
-- internally. The default value of load-factor is 0.5 (i.e. 50%); if the
-- load-factor is specified, it will be clamped to the range of [0.1, 1](i.e.
-- if load-factor is greater than 1, it will be saturated to 1, likewise,
-- if load-factor is smaller than 0.1, it will be clamped to 0.1).
function _M.new(size, load_factor)
if size < 1 then
return nil, "size too small"
end
-- Determine bucket size, which must be power of two.
local load_f = load_factor
if not load_factor then
load_f = 0.5
elseif load_factor > 1 then
load_f = 1
elseif load_factor < 0.1 then
load_f = 0.1
end
local bs_min = size / load_f
-- The bucket_sz *MUST* be a power-of-two. See the hash_string().
local bucket_sz = 1
repeat
bucket_sz = bucket_sz * 2
until bucket_sz >= bs_min
local self = {
size = size,
bucket_sz = bucket_sz,
free_queue = queue_init(size),
cache_queue = queue_init(0),
node_v = nil,
key_v = new_tab(size, 0),
val_v = new_tab(size, 0),
bucket_v = ffi_new(int_array_t, bucket_sz),
num_items = 0,
}
-- "node_v" is an array of all the nodes used in the LRU queue. Exprpession
-- node_v[i] evaluates to the element of ID "i".
self.node_v = self.free_queue
-- Allocate the array-part of the key_v, val_v, bucket_v.
--local key_v = self.key_v
--local val_v = self.val_v
--local bucket_v = self.bucket_v
ffi_fill(self.bucket_v, ffi_sizeof(int_t, bucket_sz), 0)
return setmetatable(self, mt)
end
function _M.count(self)
return self.num_items
end
function _M.capacity(self)
return self.size
end
local function hash_string(self, str)
local c_str = ffi_cast(c_str_t, str)
local hv = crc32_ptr(c_str)
hv = band(hv, self.bucket_sz - 1)
-- Hint: bucket is 0-based
return hv
end
-- Search the node associated with the key in the bucket, if found returns
-- the the id of the node, and the id of its previous node in the conflict list.
-- The "bucket_hdr_id" is the ID of the first node in the bucket
local function _find_node_in_bucket(key, key_v, node_v, bucket_hdr_id)
if bucket_hdr_id ~= 0 then
local prev = 0
local cur = bucket_hdr_id
while cur ~= 0 and key_v[cur] ~= key do
prev = cur
cur = node_v[cur].conflict
end
if cur ~= 0 then
return cur, prev
end
end
end
-- Return the node corresponding to the key/val.
local function find_key(self, key)
local key_hash = hash_string(self, key)
return _find_node_in_bucket(key, self.key_v, self.node_v,
self.bucket_v[key_hash])
end
--[[ This function tries to
1. Remove the given key and the associated value from the key/value store,
2. Remove the entry associated with the key from the hash-table.
NOTE: all queues remain intact.
If there was a node bound to the key/val, return that node; otherwise,
nil is returned.
]]
local function remove_key(self, key)
local key_v = self.key_v
local val_v = self.val_v
local node_v = self.node_v
local bucket_v = self.bucket_v
local key_hash = hash_string(self, key)
local cur, prev =
_find_node_in_bucket(key, key_v, node_v, bucket_v[key_hash])
if cur then
-- In an attempt to make key and val dead.
key_v[cur] = nil
val_v[cur] = nil
self.num_items = self.num_items - 1
-- Remove the node from the hash table
local next_node = node_v[cur].conflict
if prev ~= 0 then
node_v[prev].conflict = next_node
else
bucket_v[key_hash] = next_node
end
node_v[cur].conflict = 0
return cur
end
end
--[[ Bind the key/val with the given node, and insert the node into the Hashtab.
NOTE: this function does not touch any queue
]]
local function insert_key(self, key, val, node)
-- Bind the key/val with the node
local node_id = node.id
self.key_v[node_id] = key
self.val_v[node_id] = val
-- Insert the node into the hash-table
local key_hash = hash_string(self, key)
local bucket_v = self.bucket_v
node.conflict = bucket_v[key_hash]
bucket_v[key_hash] = node_id
self.num_items = self.num_items + 1
end
function _M.get(self, key)
if type(key) ~= "string" then
key = tostring(key)
end
local node_id = find_key(self, key)
if not node_id then
return nil
end
-- print(key, ": moving node ", tostring(node), " to cache queue head")
local cache_queue = self.cache_queue
local node = self.node_v + node_id
queue_remove(node)
queue_insert_head(cache_queue, node)
local expire = node.expire
if expire >= 0 and expire < ngx_now() then
-- print("expired: ", node.expire, " > ", ngx_now())
return nil, self.val_v[node_id], node.user_flags
end
return self.val_v[node_id], nil, node.user_flags
end
function _M.delete(self, key)
if type(key) ~= "string" then
key = tostring(key)
end
local node_id = remove_key(self, key);
if not node_id then
return false
end
local node = self.node_v + node_id
queue_remove(node)
queue_insert_tail(self.free_queue, node)
return true
end
function _M.set(self, key, value, ttl, flags)
if type(key) ~= "string" then
key = tostring(key)
end
local node_id = find_key(self, key)
local node
if not node_id then
local free_queue = self.free_queue
if queue_is_empty(free_queue) then
-- evict the least recently used key
-- assert(not queue_is_empty(self.cache_queue))
node = queue_last(self.cache_queue)
remove_key(self, self.key_v[node.id])
else
-- take a free queue node
node = queue_head(free_queue)
-- print(key, ": get a new free node: ", tostring(node))
end
-- insert the key
insert_key(self, key, value, node)
else
node = self.node_v + node_id
self.val_v[node_id] = value
end
queue_remove(node)
queue_insert_head(self.cache_queue, node)
if ttl then
node.expire = ngx_now() + ttl
else
node.expire = -1
end
if type(flags) == "number" and flags >= 0 then
node.user_flags = flags
else
node.user_flags = 0
end
end
function _M.get_keys(self, max_count, res)
if not max_count or max_count == 0 then
max_count = self.num_items
end
if not res then
res = new_tab(max_count + 1, 0) -- + 1 for trailing hole
end
local cache_queue = self.cache_queue
local key_v = self.key_v
local i = 0
local node = queue_head(cache_queue)
while node ~= cache_queue do
if i >= max_count then
break
end
i = i + 1
res[i] = key_v[node.id]
node = node.next
end
res[i + 1] = nil
return res
end
function _M.flush_all(self)
local cache_queue = self.cache_queue
local key_v = self.key_v
local node = queue_head(cache_queue)
while node ~= cache_queue do
local key = key_v[node.id]
node = node.next
_M.delete(self, key)
end
end
return _M

198
t/001-sanity.t Normal file
View File

@ -0,0 +1,198 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use lib '.';
use t::TestLRUCache;
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
no_long_string();
run_tests();
__DATA__
=== TEST 1: sanity
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache"
local c = lrucache.new(2)
collectgarbage()
c:set("dog", 32)
c:set("cat", 56)
ngx.say("dog: ", (c:get("dog")))
ngx.say("cat: ", (c:get("cat")))
c:set("dog", 32)
c:set("cat", 56)
ngx.say("dog: ", (c:get("dog")))
ngx.say("cat: ", (c:get("cat")))
c:delete("dog")
c:delete("cat")
ngx.say("dog: ", (c:get("dog")))
ngx.say("cat: ", (c:get("cat")))
';
}
--- response_body
dog: 32
cat: 56
dog: 32
cat: 56
dog: nil
cat: nil
=== TEST 2: evict existing items
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache"
local c = lrucache.new(2)
if not c then
ngx.say("failed to init lrucace: ", err)
return
end
c:set("dog", 32)
c:set("cat", 56)
ngx.say("dog: ", (c:get("dog")))
ngx.say("cat: ", (c:get("cat")))
c:set("bird", 76)
ngx.say("dog: ", (c:get("dog")))
ngx.say("cat: ", (c:get("cat")))
ngx.say("bird: ", (c:get("bird")))
';
}
--- response_body
dog: 32
cat: 56
dog: nil
cat: 56
bird: 76
=== TEST 3: evict existing items (reordered, get should also count)
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache"
local c = lrucache.new(2)
if not c then
ngx.say("failed to init lrucace: ", err)
return
end
c:set("cat", 56)
c:set("dog", 32)
ngx.say("dog: ", (c:get("dog")))
ngx.say("cat: ", (c:get("cat")))
c:set("bird", 76)
ngx.say("dog: ", (c:get("dog")))
ngx.say("cat: ", (c:get("cat")))
ngx.say("bird: ", (c:get("bird")))
';
}
--- response_body
dog: 32
cat: 56
dog: nil
cat: 56
bird: 76
=== TEST 4: ttl
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache"
local c = lrucache.new(1)
c:set("dog", 32, 0.5)
ngx.say("dog: ", (c:get("dog")))
ngx.sleep(0.25)
ngx.say("dog: ", (c:get("dog")))
ngx.sleep(0.26)
local v, err = c:get("dog")
ngx.say("dog: ", v, " ", err)
';
}
--- response_body
dog: 32
dog: 32
dog: nil 32
=== TEST 5: ttl
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache"
local lim = 5
local c = lrucache.new(lim)
local n = 1000
for i = 1, n do
c:set("dog" .. i, i)
c:delete("dog" .. i)
c:set("dog" .. i, i)
local cnt = 0
for k, v in pairs(c.hasht) do
cnt = cnt + 1
end
assert(cnt <= lim)
end
for i = 1, n do
local key = "dog" .. math.random(1, n)
c:get(key)
end
for i = 1, n do
local key = "dog" .. math.random(1, n)
c:get(key)
c:set("dog" .. i, i)
local cnt = 0
for k, v in pairs(c.hasht) do
cnt = cnt + 1
end
assert(cnt <= lim)
end
ngx.say("ok")
';
}
--- response_body
ok
--- timeout: 20
=== TEST 6: replace value
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache"
local c = lrucache.new(1)
c:set("dog", 32)
ngx.say("dog: ", (c:get("dog")))
c:set("dog", 33)
ngx.say("dog: ", (c:get("dog")))
';
}
--- response_body
dog: 32
dog: 33

View File

@ -0,0 +1,32 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use lib '.';
use t::TestLRUCache;
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
no_long_string();
run_tests();
__DATA__
=== TEST 1: should-store-false
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache"
local c = lrucache.new(2)
collectgarbage()
c:set("false-value", false)
ngx.say("false-value: ", (c:get("false-value")))
c:delete("false-value")
ngx.say("false-value: ", (c:get("false-value")))
';
}
--- response_body
false-value: false
false-value: nil

94
t/003-init-by-lua.t Normal file
View File

@ -0,0 +1,94 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use lib '.';
use t::TestLRUCache;
repeat_each(1);
plan tests => repeat_each() * (blocks() * 2);
no_long_string();
run_tests();
__DATA__
=== TEST 1: sanity
--- http_config eval
"$t::TestLRUCache::HttpConfig"
. qq!
init_by_lua '
local function log(...)
print("[cache] ", ...)
end
local lrucache = require "resty.lrucache"
local c = lrucache.new(2)
collectgarbage()
c:set("dog", 32)
c:set("cat", 56)
log("dog: ", (c:get("dog")))
log("cat: ", (c:get("cat")))
c:set("dog", 32)
c:set("cat", 56)
log("dog: ", (c:get("dog")))
log("cat: ", (c:get("cat")))
c:delete("dog")
c:delete("cat")
log("dog: ", (c:get("dog")))
log("cat: ", (c:get("cat")))
';
!
--- config
location = /t {
return 200;
}
--- ignore_response
--- error_log
--- grep_error_log eval: qr/\[cache\] .*? (?:\d+|nil)/
--- grep_error_log_out
[cache] dog: 32
[cache] cat: 56
[cache] dog: 32
[cache] cat: 56
[cache] dog: nil
[cache] cat: nil
=== TEST 2: sanity
--- http_config eval
"$t::TestLRUCache::HttpConfig"
. qq!
init_by_lua '
lrucache = require "resty.lrucache"
flv_index, err = lrucache.new(200)
if not flv_index then
ngx.log(ngx.ERR, "failed to create the cache: ", err)
return
end
flv_meta, err = lrucache.new(200)
if not flv_meta then
ngx.log(ngx.ERR, "failed to create the cache: ", err)
return
end
flv_channel, err = lrucache.new(200)
if not flv_channel then
ngx.log(ngx.ERR, "failed to create the cache: ", err)
return
end
print("3 lrucache initialized.")
';
!
--- config
location = /t {
return 200;
}
--- ignore_response
--- error_log
3 lrucache initialized.

152
t/004-flush-all.t Normal file
View File

@ -0,0 +1,152 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use lib '.';
use t::TestLRUCache;
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
no_long_string();
run_tests();
__DATA__
=== TEST 1: flush_all() deletes all keys (cache partial occupied)
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local N = 4
local c = lrucache.new(N)
for i = 1, N / 2 do
c:set("key " .. i, i)
end
c:flush_all()
for i = 1, N do
local key = "key " .. i
local v = c:get(key)
ngx.say(i, ": ", v)
end
ngx.say("++")
for i = 1, N + 1 do
c:set("key " .. i, i)
end
for i = 1, N + 1 do
ngx.say(i, ": ", (c:get("key " .. i)))
end
}
}
--- response_body
1: nil
2: nil
3: nil
4: nil
++
1: nil
2: 2
3: 3
4: 4
5: 5
=== TEST 2: flush_all() deletes all keys (cache fully occupied)
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local N = 4
local c = lrucache.new(N)
for i = 1, N + 1 do
c:set("key " .. i, i)
end
ngx.say(c:count())
c:flush_all()
ngx.say(c:count())
for i = 1, N + 1 do
local key = "key " .. i
local v = c:get(key)
ngx.say(i, ": ", v)
end
ngx.say("++")
for i = 1, N + 1 do
c:set("key " .. i, i)
end
for i = 1, N + 1 do
ngx.say(i, ": ", (c:get("key " .. i)))
end
}
}
--- response_body
4
0
1: nil
2: nil
3: nil
4: nil
5: nil
++
1: nil
2: 2
3: 3
4: 4
5: 5
=== TEST 3: flush_all() flush empty cache store
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local N = 4
local c = lrucache.new(4)
c:flush_all()
for i = 1, N do
local key = "key " .. i
local v = c:get(key)
ngx.say(i, ": ", v)
end
ngx.say("++")
for i = 1, N + 1 do
c:set("key " .. i, i)
end
for i = 1, N + 1 do
ngx.say(i, ": ", (c:get("key " .. i)))
end
}
}
--- response_body
1: nil
2: nil
3: nil
4: nil
++
1: nil
2: 2
3: 3
4: 4
5: 5

25
t/005-capacity.t Normal file
View File

@ -0,0 +1,25 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use lib '.';
use t::TestLRUCache;
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
no_long_string();
run_tests();
__DATA__
=== TEST 1: capacity() returns total cache capacity
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local c = lrucache.new(2)
ngx.say("capacity: ", c:capacity())
}
}
--- response_body
capacity: 2

53
t/006-count.t Normal file
View File

@ -0,0 +1,53 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use lib '.';
use t::TestLRUCache;
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
no_long_string();
run_tests();
__DATA__
=== TEST 1: count() returns current cache size
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local c = lrucache.new(2)
ngx.say("count: ", c:count())
c:set("dog", 32)
ngx.say("count: ", c:count())
c:set("dog", 33)
ngx.say("count: ", c:count())
c:set("cat", 33)
ngx.say("count: ", c:count())
c:set("pig", 33)
ngx.say("count: ", c:count())
c:delete("dog")
ngx.say("count: ", c:count())
c:delete("pig")
ngx.say("count: ", c:count())
c:delete("cat")
ngx.say("count: ", c:count())
}
}
--- response_body
count: 0
count: 1
count: 1
count: 2
count: 2
count: 2
count: 1
count: 0

234
t/007-get-keys.t Normal file
View File

@ -0,0 +1,234 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use lib '.';
use t::TestLRUCache;
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
no_long_string();
run_tests();
__DATA__
=== TEST 1: get_keys() with some keys
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local c = lrucache.new(100)
c:set("hello", true)
c:set("world", false)
local keys = c:get_keys()
ngx.say("size: ", #keys)
for i = 1, #keys do
ngx.say(keys[i])
end
}
}
--- response_body
size: 2
world
hello
=== TEST 2: get_keys() with no keys
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local c = lrucache.new(100)
local keys = c:get_keys()
ngx.say("size: ", #keys)
for i = 1, #keys do
ngx.say(keys[i])
end
}
}
--- response_body
size: 0
=== TEST 3: get_keys() with full cache
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local c = lrucache.new(100)
for i = 1, 100 do
c:set("key-" .. i, true)
end
c:set("extra-key", true)
local keys = c:get_keys()
ngx.say("size: ", #keys)
ngx.say("MRU: ", keys[1])
ngx.say("LRU: ", keys[#keys])
}
}
--- response_body
size: 100
MRU: extra-key
LRU: key-2
=== TEST 4: get_keys() max_count = 5
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local c = lrucache.new(100)
for i = 1, 100 do
c:set("key-" .. i, true)
end
local keys = c:get_keys(5)
ngx.say("size: ", #keys)
ngx.say("MRU: ", keys[1])
ngx.say("latest: ", keys[#keys])
}
}
--- response_body
size: 5
MRU: key-100
latest: key-96
=== TEST 5: get_keys() max_count = 0 disables max returns
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local c = lrucache.new(100)
for i = 1, 100 do
c:set("key-" .. i, true)
end
local keys = c:get_keys(0)
ngx.say("size: ", #keys)
ngx.say("MRU: ", keys[1])
ngx.say("LRU: ", keys[#keys])
}
}
--- response_body
size: 100
MRU: key-100
LRU: key-1
=== TEST 6: get_keys() user-fed res table
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local c1 = lrucache.new(3)
local c2 = lrucache.new(2)
for i = 1, 3 do
c1:set("c1-" .. i, true)
end
for i = 1, 2 do
c2:set("c2-" .. i, true)
end
local res = {}
local keys_1 = c1:get_keys(0, res)
ngx.say("res is user-fed: ", keys_1 == res)
for _, k in ipairs(keys_1) do
ngx.say(k)
end
res = {}
local keys_2 = c2:get_keys(0, res)
for _, k in ipairs(keys_2) do
ngx.say(k)
end
}
}
--- response_body
res is user-fed: true
c1-3
c1-2
c1-1
c2-2
c2-1
=== TEST 7: get_keys() user-fed res table + max_count
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local c1 = lrucache.new(3)
for i = 1, 3 do
c1:set("key-" .. i, true)
end
local res = {}
local keys = c1:get_keys(2, res)
for _, k in ipairs(keys) do
ngx.say(k)
end
}
}
--- response_body
key-3
key-2
=== TEST 8: get_keys() user-fed res table gets a trailing hole
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local c1 = lrucache.new(3)
for i = 1, 3 do
c1:set("key-" .. i, true)
end
local res = {}
for i = 1, 10 do
res[i] = true
end
local keys = c1:get_keys(2, res)
for _, k in ipairs(keys) do
ngx.say(k)
end
}
}
--- response_body
key-3
key-2

175
t/008-user-flags.t Normal file
View File

@ -0,0 +1,175 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use lib '.';
use t::TestLRUCache;
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
no_long_string();
run_tests();
__DATA__
=== TEST 1: no user flags by default
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local c = lrucache.new(2)
c:set("dog", 32)
c:set("cat", 56)
local v, err, flags = c:get("dog")
ngx.say("dog: ", v, " ", err, " ", flags)
local v, err, flags = c:get("cat")
ngx.say("cat: ", v, " ", err, " ", flags)
}
}
--- response_body
dog: 32 nil 0
cat: 56 nil 0
=== TEST 2: stores user flags if specified
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local c = lrucache.new(2)
c:set("dog", 32, nil, 0x01)
c:set("cat", 56, nil, 0x02)
local v, err, flags = c:get("dog")
ngx.say("dog: ", v, " ", err, " ", flags)
local v, err, flags = c:get("cat")
ngx.say("cat: ", v, " ", err, " ", flags)
}
}
--- response_body
dog: 32 nil 1
cat: 56 nil 2
=== TEST 3: user flags cannot be negative
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local c = lrucache.new(3)
c:set("dog", 32, nil, 0)
c:set("cat", 56, nil, -1)
local v, err, flags = c:get("dog")
ngx.say("dog: ", v, " ", err, " ", flags)
local v, err, flags = c:get("cat")
ngx.say("cat: ", v, " ", err, " ", flags)
}
}
--- response_body
dog: 32 nil 0
cat: 56 nil 0
=== TEST 4: user flags not number is ignored
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local c = lrucache.new(2)
c:set("dog", 32, nil, "")
local v, err, flags = c:get("dog")
ngx.say(v, " ", err, " ", flags)
}
}
--- response_body
32 nil 0
=== TEST 5: all nodes from double-ended queue have flags
--- config
location = /t {
content_by_lua_block {
local len = 10
local lrucache = require "resty.lrucache"
local c = lrucache.new(len)
for i = 1, len do
c:set(i, 32, nil, 1)
end
for i = 1, len do
local v, _, flags = c:get(i)
if not flags then
ngx.say("item ", i, " does not have flags")
return
end
end
ngx.say("ok")
}
}
--- response_body
ok
=== TEST 6: user flags are preserved when item is stale
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local c = lrucache.new(1)
c:set("dogs", 32, 0.2, 3)
ngx.sleep(0.21)
local v, err, flags = c:get("dogs")
ngx.say(v, " ", err, " ", flags)
}
}
--- response_body
nil 32 3
=== TEST 7: user flags are not preserved upon eviction
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache"
local c = lrucache.new(1)
for i = 1, 10 do
local flags = i % 2 == 0 and i
c:set(i, true, nil, flags)
local v, err, flags = c:get(i)
ngx.say(v, " ", err, " ", flags)
end
}
}
--- response_body
true nil 0
true nil 2
true nil 0
true nil 4
true nil 0
true nil 6
true nil 0
true nil 8
true nil 0
true nil 10

307
t/100-pureffi/001-sanity.t Normal file
View File

@ -0,0 +1,307 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use lib '.';
use t::TestLRUCache;
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
no_long_string();
run_tests();
__DATA__
=== TEST 1: sanity
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(2)
collectgarbage()
c:set("dog", 32)
c:set("cat", 56)
ngx.say("dog: ", (c:get("dog")))
ngx.say("cat: ", (c:get("cat")))
c:set("dog", 32)
c:set("cat", 56)
ngx.say("dog: ", (c:get("dog")))
ngx.say("cat: ", (c:get("cat")))
c:delete("dog")
c:delete("cat")
ngx.say("dog: ", (c:get("dog")))
ngx.say("cat: ", (c:get("cat")))
';
}
--- response_body
dog: 32
cat: 56
dog: 32
cat: 56
dog: nil
cat: nil
=== TEST 2: evict existing items
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(2)
if not c then
ngx.say("failed to init lrucace: ", err)
return
end
c:set("dog", 32)
c:set("cat", 56)
ngx.say("dog: ", (c:get("dog")))
ngx.say("cat: ", (c:get("cat")))
c:set("bird", 76)
ngx.say("dog: ", (c:get("dog")))
ngx.say("cat: ", (c:get("cat")))
ngx.say("bird: ", (c:get("bird")))
';
}
--- response_body
dog: 32
cat: 56
dog: nil
cat: 56
bird: 76
=== TEST 3: evict existing items (reordered, get should also count)
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(2)
if not c then
ngx.say("failed to init lrucace: ", err)
return
end
c:set("cat", 56)
c:set("dog", 32)
ngx.say("dog: ", (c:get("dog")))
ngx.say("cat: ", (c:get("cat")))
c:set("bird", 76)
ngx.say("dog: ", (c:get("dog")))
ngx.say("cat: ", (c:get("cat")))
ngx.say("bird: ", (c:get("bird")))
';
}
--- response_body
dog: 32
cat: 56
dog: nil
cat: 56
bird: 76
=== TEST 4: ttl
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(1)
c:set("dog", 32, 0.6)
ngx.say("dog: ", (c:get("dog")))
ngx.sleep(0.3)
ngx.say("dog: ", (c:get("dog")))
ngx.sleep(0.31)
local v, err = c:get("dog")
ngx.say("dog: ", v, " ", err)
';
}
--- response_body
dog: 32
dog: 32
dog: nil 32
=== TEST 5: load factor
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(1, 0.25)
ngx.say(c.bucket_sz)
';
}
--- response_body
4
=== TEST 6: load factor clamped to 0.1
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(3, 0.05)
ngx.say(c.bucket_sz)
';
}
--- response_body
32
=== TEST 7: load factor saturated to 1
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(3, 2.1)
ngx.say(c.bucket_sz)
';
}
--- response_body
4
=== TEST 8: non-string keys
--- config
location = /t {
content_by_lua '
local function log(...)
ngx.say(...)
end
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(2)
collectgarbage()
local tab1 = {1, 2}
local tab2 = {3, 4}
c:set(tab1, 32)
c:set(tab2, 56)
log("tab1: ", (c:get(tab1)))
log("tab2: ", (c:get(tab2)))
c:set(tab1, 32)
c:set(tab2, 56)
log("tab1: ", (c:get(tab1)))
log("tab2: ", (c:get(tab2)))
c:delete(tab1)
c:delete(tab2)
log("tab1: ", (c:get(tab1)))
log("tab2: ", (c:get(tab2)))
';
}
--- response_body
tab1: 32
tab2: 56
tab1: 32
tab2: 56
tab1: nil
tab2: nil
=== TEST 9: replace value
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(1)
c:set("dog", 32)
ngx.say("dog: ", (c:get("dog")))
c:set("dog", 33)
ngx.say("dog: ", (c:get("dog")))
';
}
--- response_body
dog: 32
dog: 33
=== TEST 10: replace value 2
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(1)
c:set("dog", 32, 1.0)
ngx.say("dog: ", (c:get("dog")))
c:set("dog", 33, 0.3)
ngx.say("dog: ", (c:get("dog")))
ngx.sleep(0.4)
local v, err = c:get("dog")
ngx.say("dog: ", v, " ", err)
';
}
--- response_body
dog: 32
dog: 33
dog: nil 33
=== TEST 11: replace value 3 (the old value has longer expire time)
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(1)
c:set("dog", 32, 1.2)
c:set("dog", 33, 0.6)
ngx.sleep(0.2)
ngx.say("dog: ", (c:get("dog")))
ngx.sleep(0.5)
local v, err = c:get("dog")
ngx.say("dog: ", v, " ", err)
';
}
--- response_body
dog: 33
dog: nil 33
=== TEST 12: replace value 4
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(1)
c:set("dog", 32, 0.1)
ngx.sleep(0.2)
c:set("dog", 33)
ngx.sleep(0.2)
ngx.say("dog: ", (c:get("dog")))
';
}
--- response_body
dog: 33

View File

@ -0,0 +1,32 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use lib '.';
use t::TestLRUCache;
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
no_long_string();
run_tests();
__DATA__
=== TEST 1: should-store-false
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(2)
collectgarbage()
c:set("false-value", false)
ngx.say("false-value: ", (c:get("false-value")))
c:delete("false-value")
ngx.say("false-value: ", (c:get("false-value")))
';
}
--- response_body
false-value: false
false-value: nil

View File

@ -0,0 +1,94 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use lib '.';
use t::TestLRUCache;
repeat_each(1);
plan tests => repeat_each() * (blocks() * 2);
no_long_string();
run_tests();
__DATA__
=== TEST 1: sanity
--- http_config eval
"$t::TestLRUCache::HttpConfig"
. qq!
init_by_lua '
local function log(...)
print("[cache] ", ...)
end
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(2)
collectgarbage()
c:set("dog", 32)
c:set("cat", 56)
log("dog: ", (c:get("dog")))
log("cat: ", (c:get("cat")))
c:set("dog", 32)
c:set("cat", 56)
log("dog: ", (c:get("dog")))
log("cat: ", (c:get("cat")))
c:delete("dog")
c:delete("cat")
log("dog: ", (c:get("dog")))
log("cat: ", (c:get("cat")))
';
!
--- config
location = /t {
return 200;
}
--- ignore_response
--- error_log
--- grep_error_log eval: qr/\[cache\] .*? (?:\d+|nil)/
--- grep_error_log_out
[cache] dog: 32
[cache] cat: 56
[cache] dog: 32
[cache] cat: 56
[cache] dog: nil
[cache] cat: nil
=== TEST 2: sanity
--- http_config eval
"$t::TestLRUCache::HttpConfig"
. qq!
init_by_lua '
lrucache = require "resty.lrucache.pureffi"
flv_index, err = lrucache.new(200)
if not flv_index then
ngx.log(ngx.ERR, "failed to create the cache: ", err)
return
end
flv_meta, err = lrucache.new(200)
if not flv_meta then
ngx.log(ngx.ERR, "failed to create the cache: ", err)
return
end
flv_channel, err = lrucache.new(200)
if not flv_channel then
ngx.log(ngx.ERR, "failed to create the cache: ", err)
return
end
print("3 lrucache initialized.")
';
!
--- config
location = /t {
return 200;
}
--- ignore_response
--- error_log
3 lrucache initialized.

View File

@ -0,0 +1,152 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use lib '.';
use t::TestLRUCache;
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
no_long_string();
run_tests();
__DATA__
=== TEST 1: flush_all() deletes all keys (cache partial occupied)
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local N = 4
local c = lrucache.new(N)
for i = 1, N / 2 do
c:set("key " .. i, i)
end
c:flush_all()
for i = 1, N do
local key = "key " .. i
local v = c:get(key)
ngx.say(i, ": ", v)
end
ngx.say("++")
for i = 1, N + 1 do
c:set("key " .. i, i)
end
for i = 1, N + 1 do
ngx.say(i, ": ", (c:get("key " .. i)))
end
}
}
--- response_body
1: nil
2: nil
3: nil
4: nil
++
1: nil
2: 2
3: 3
4: 4
5: 5
=== TEST 2: flush_all() deletes all keys (cache fully occupied)
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local N = 4
local c = lrucache.new(N)
for i = 1, N + 1 do
c:set("key " .. i, i)
end
ngx.say(c:count())
c:flush_all()
ngx.say(c:count())
for i = 1, N + 1 do
local key = "key " .. i
local v = c:get(key)
ngx.say(i, ": ", v)
end
ngx.say("++")
for i = 1, N + 1 do
c:set("key " .. i, i)
end
for i = 1, N + 1 do
ngx.say(i, ": ", (c:get("key " .. i)))
end
}
}
--- response_body
4
0
1: nil
2: nil
3: nil
4: nil
5: nil
++
1: nil
2: 2
3: 3
4: 4
5: 5
=== TEST 3: flush_all() flush empty cache store
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local N = 4
local c = lrucache.new(4)
c:flush_all()
for i = 1, N do
local key = "key " .. i
local v = c:get(key)
ngx.say(i, ": ", v)
end
ngx.say("++")
for i = 1, N + 1 do
c:set("key " .. i, i)
end
for i = 1, N + 1 do
ngx.say(i, ": ", (c:get("key " .. i)))
end
}
}
--- response_body
1: nil
2: nil
3: nil
4: nil
++
1: nil
2: 2
3: 3
4: 4
5: 5

View File

@ -0,0 +1,25 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use lib '.';
use t::TestLRUCache;
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
no_long_string();
run_tests();
__DATA__
=== TEST 1: capacity() returns total cache capacity
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(2)
ngx.say("capacity: ", c:capacity())
}
}
--- response_body
capacity: 2

53
t/100-pureffi/006-count.t Normal file
View File

@ -0,0 +1,53 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use lib '.';
use t::TestLRUCache;
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
no_long_string();
run_tests();
__DATA__
=== TEST 1: count() returns current cache size
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(2)
ngx.say("count: ", c:count())
c:set("dog", 32)
ngx.say("count: ", c:count())
c:set("dog", 33)
ngx.say("count: ", c:count())
c:set("cat", 33)
ngx.say("count: ", c:count())
c:set("pig", 33)
ngx.say("count: ", c:count())
c:delete("dog")
ngx.say("count: ", c:count())
c:delete("pig")
ngx.say("count: ", c:count())
c:delete("cat")
ngx.say("count: ", c:count())
}
}
--- response_body
count: 0
count: 1
count: 1
count: 2
count: 2
count: 2
count: 1
count: 0

View File

@ -0,0 +1,234 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use lib '.';
use t::TestLRUCache;
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
no_long_string();
run_tests();
__DATA__
=== TEST 1: get_keys() with some keys
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(100)
c:set("hello", true)
c:set("world", false)
local keys = c:get_keys()
ngx.say("size: ", #keys)
for i = 1, #keys do
ngx.say(keys[i])
end
}
}
--- response_body
size: 2
world
hello
=== TEST 2: get_keys() with no keys
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(100)
local keys = c:get_keys()
ngx.say("size: ", #keys)
for i = 1, #keys do
ngx.say(keys[i])
end
}
}
--- response_body
size: 0
=== TEST 3: get_keys() with full cache
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(100)
for i = 1, 100 do
c:set("key-" .. i, true)
end
c:set("extra-key", true)
local keys = c:get_keys()
ngx.say("size: ", #keys)
ngx.say("MRU: ", keys[1])
ngx.say("LRU: ", keys[#keys])
}
}
--- response_body
size: 100
MRU: extra-key
LRU: key-2
=== TEST 4: get_keys() max_count = 5
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(100)
for i = 1, 100 do
c:set("key-" .. i, true)
end
local keys = c:get_keys(5)
ngx.say("size: ", #keys)
ngx.say("MRU: ", keys[1])
ngx.say("latest: ", keys[#keys])
}
}
--- response_body
size: 5
MRU: key-100
latest: key-96
=== TEST 5: get_keys() max_count = 0 disables max returns
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(100)
for i = 1, 100 do
c:set("key-" .. i, true)
end
local keys = c:get_keys(0)
ngx.say("size: ", #keys)
ngx.say("MRU: ", keys[1])
ngx.say("LRU: ", keys[#keys])
}
}
--- response_body
size: 100
MRU: key-100
LRU: key-1
=== TEST 6: get_keys() user-fed res table
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local c1 = lrucache.new(3)
local c2 = lrucache.new(2)
for i = 1, 3 do
c1:set("c1-" .. i, true)
end
for i = 1, 2 do
c2:set("c2-" .. i, true)
end
local res = {}
local keys_1 = c1:get_keys(0, res)
ngx.say("res is user-fed: ", keys_1 == res)
for _, k in ipairs(keys_1) do
ngx.say(k)
end
res = {}
local keys_2 = c2:get_keys(0, res)
for _, k in ipairs(keys_2) do
ngx.say(k)
end
}
}
--- response_body
res is user-fed: true
c1-3
c1-2
c1-1
c2-2
c2-1
=== TEST 7: get_keys() user-fed res table + max_count
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local c1 = lrucache.new(3)
for i = 1, 3 do
c1:set("key-" .. i, true)
end
local res = {}
local keys = c1:get_keys(2, res)
for _, k in ipairs(keys) do
ngx.say(k)
end
}
}
--- response_body
key-3
key-2
=== TEST 8: get_keys() user-fed res table gets a trailing hole
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local c1 = lrucache.new(3)
for i = 1, 3 do
c1:set("key-" .. i, true)
end
local res = {}
for i = 1, 10 do
res[i] = true
end
local keys = c1:get_keys(2, res)
for _, k in ipairs(keys) do
ngx.say(k)
end
}
}
--- response_body
key-3
key-2

View File

@ -0,0 +1,175 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use lib '.';
use t::TestLRUCache;
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
no_long_string();
run_tests();
__DATA__
=== TEST 1: no user flags by default
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(2)
c:set("dog", 32)
c:set("cat", 56)
local v, err, flags = c:get("dog")
ngx.say("dog: ", v, " ", err, " ", flags)
local v, err, flags = c:get("cat")
ngx.say("cat: ", v, " ", err, " ", flags)
}
}
--- response_body
dog: 32 nil 0
cat: 56 nil 0
=== TEST 2: stores user flags if specified
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(2)
c:set("dog", 32, nil, 0x01)
c:set("cat", 56, nil, 0x02)
local v, err, flags = c:get("dog")
ngx.say("dog: ", v, " ", err, " ", flags)
local v, err, flags = c:get("cat")
ngx.say("cat: ", v, " ", err, " ", flags)
}
}
--- response_body
dog: 32 nil 1
cat: 56 nil 2
=== TEST 3: user flags cannot be negative
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(3)
c:set("dog", 32, nil, 0)
c:set("cat", 56, nil, -1)
local v, err, flags = c:get("dog")
ngx.say("dog: ", v, " ", err, " ", flags)
local v, err, flags = c:get("cat")
ngx.say("cat: ", v, " ", err, " ", flags)
}
}
--- response_body
dog: 32 nil 0
cat: 56 nil 0
=== TEST 4: user flags not number is ignored
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(2)
c:set("dog", 32, nil, "")
local v, err, flags = c:get("dog")
ngx.say(v, " ", err, " ", flags)
}
}
--- response_body
32 nil 0
=== TEST 5: all nodes from double-ended queue have flags
--- config
location = /t {
content_by_lua_block {
local len = 10
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(len)
for i = 1, len do
c:set(i, 32, nil, 1)
end
for i = 1, len do
local v, _, flags = c:get(i)
if not flags then
ngx.say("item ", i, " does not have flags")
return
end
end
ngx.say("ok")
}
}
--- response_body
ok
=== TEST 6: user flags are preserved when item is stale
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(1)
c:set("dogs", 32, 0.2, 3)
ngx.sleep(0.21)
local v, err, flags = c:get("dogs")
ngx.say(v, " ", err, " ", flags)
}
}
--- response_body
nil 32 3
=== TEST 7: user flags are not preserved upon eviction
--- config
location = /t {
content_by_lua_block {
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(1)
for i = 1, 10 do
local flags = i % 2 == 0 and i
c:set(i, true, nil, flags)
local v, err, flags = c:get(i)
ngx.say(v, " ", err, " ", flags)
end
}
}
--- response_body
true nil 0
true nil 2
true nil 0
true nil 4
true nil 0
true nil 6
true nil 0
true nil 8
true nil 0
true nil 10

52
t/101-mixed.t Normal file
View File

@ -0,0 +1,52 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use lib '.';
use t::TestLRUCache;
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
no_long_string();
run_tests();
__DATA__
=== TEST 1: sanity
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache"
local c = lrucache.new(2)
collectgarbage()
c:set("dog", 32)
c:set("cat", 56)
ngx.say("dog: ", (c:get("dog")))
ngx.say("cat: ", (c:get("cat")))
local lrucache = require "resty.lrucache.pureffi"
local c2 = lrucache.new(2)
ngx.say("dog: ", (c2:get("dog")))
ngx.say("cat: ", (c2:get("cat")))
c2:set("dog", 9)
c2:set("cat", "hi")
ngx.say("dog: ", (c2:get("dog")))
ngx.say("cat: ", (c2:get("cat")))
ngx.say("dog: ", (c:get("dog")))
ngx.say("cat: ", (c:get("cat")))
';
}
--- response_body
dog: 32
cat: 56
dog: nil
cat: nil
dog: 9
cat: hi
dog: 32
cat: 56

38
t/TestLRUCache.pm Normal file
View File

@ -0,0 +1,38 @@
package t::TestLRUCache;
use Test::Nginx::Socket::Lua -Base;
use Cwd qw(cwd);
$ENV{TEST_NGINX_HOTLOOP} ||= 10;
our $pwd = cwd();
our $lua_package_path = './lib/?.lua;;';
our $HttpConfig = <<_EOC_;
lua_package_path '$lua_package_path';
_EOC_
our @EXPORT = qw(
$pwd
$lua_package_path
$HttpConfig
);
add_block_preprocessor(sub {
my $block = shift;
if (!defined $block->http_config) {
$block->set_value("http_config", $HttpConfig);
}
if (!defined $block->request) {
$block->set_value("request", "GET /t");
}
if (!defined $block->no_error_log) {
$block->set_value("no_error_log", "[error]");
}
});
1;

70
valgrind.suppress Normal file
View File

@ -0,0 +1,70 @@
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_event_process_init
}
{
<insert_a_suppression_name_here>
Memcheck:Param
epoll_ctl(event)
fun:epoll_ctl
fun:ngx_epoll_add_event
}
{
<insert_a_suppression_name_here>
Memcheck:Cond
fun:index
fun:expand_dynamic_string_token
fun:_dl_map_object
fun:map_doit
fun:_dl_catch_error
fun:do_preload
fun:dl_main
fun:_dl_sysdep_start
fun:_dl_start
}
{
<insert_a_suppression_name_here>
Memcheck:Param
epoll_ctl(event)
fun:epoll_ctl
fun:ngx_epoll_init
fun:ngx_event_process_init
}
{
<insert_a_suppression_name_here>
Memcheck:Param
epoll_ctl(event)
fun:epoll_ctl
fun:ngx_epoll_notify_init
fun:ngx_epoll_init
fun:ngx_event_process_init
}
{
<insert_a_suppression_name_here>
Memcheck:Param
epoll_ctl(event)
fun:epoll_ctl
fun:ngx_epoll_test_rdhup
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:ngx_alloc
fun:ngx_set_environment
fun:ngx_single_process_cycle
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:ngx_alloc
fun:ngx_set_environment
fun:ngx_worker_process_init
fun:ngx_worker_process_cycle
}