mirror of https://github.com/pypa/pip
Bundle CacheControl and lockfile
This commit is contained in:
parent
b0b05737a9
commit
077fa14641
|
@ -11,6 +11,7 @@ Modifications
|
|||
* html5lib has been modified to import six from pip._vendor
|
||||
* pkg_resources has been modified to import _markerlib from pip._vendor
|
||||
* markerlib has been modified to import its API from pip._vendor
|
||||
* CacheControl has been modified to import it's dependencies from pip._vendor
|
||||
|
||||
|
||||
Markerlib and pkg_resources
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
"""CacheControl import Interface.
|
||||
|
||||
Make it easy to import from cachecontrol without long namespaces.
|
||||
"""
|
||||
from .wrapper import CacheControl
|
||||
from .adapter import CacheControlAdapter
|
||||
from .controller import CacheController
|
|
@ -0,0 +1,87 @@
|
|||
import functools
|
||||
|
||||
from pip._vendor.requests.adapters import HTTPAdapter
|
||||
|
||||
from .controller import CacheController
|
||||
from .cache import DictCache
|
||||
from .filewrapper import CallbackFileWrapper
|
||||
|
||||
|
||||
class CacheControlAdapter(HTTPAdapter):
|
||||
invalidating_methods = set(['PUT', 'DELETE'])
|
||||
|
||||
def __init__(self, cache=None, cache_etags=True, controller_class=None,
|
||||
serializer=None, *args, **kw):
|
||||
super(CacheControlAdapter, self).__init__(*args, **kw)
|
||||
self.cache = cache or DictCache()
|
||||
|
||||
controller_factory = controller_class or CacheController
|
||||
self.controller = controller_factory(
|
||||
self.cache,
|
||||
cache_etags=cache_etags,
|
||||
serializer=serializer,
|
||||
)
|
||||
|
||||
def send(self, request, **kw):
|
||||
"""
|
||||
Send a request. Use the request information to see if it
|
||||
exists in the cache and cache the response if we need to and can.
|
||||
"""
|
||||
if request.method == 'GET':
|
||||
cached_response = self.controller.cached_request(request)
|
||||
if cached_response:
|
||||
return self.build_response(request, cached_response, from_cache=True)
|
||||
|
||||
# check for etags and add headers if appropriate
|
||||
request.headers.update(self.controller.conditional_headers(request))
|
||||
|
||||
resp = super(CacheControlAdapter, self).send(request, **kw)
|
||||
|
||||
return resp
|
||||
|
||||
def build_response(self, request, response, from_cache=False):
|
||||
"""
|
||||
Build a response by making a request or using the cache.
|
||||
|
||||
This will end up calling send and returning a potentially
|
||||
cached response
|
||||
"""
|
||||
if not from_cache and request.method == 'GET':
|
||||
if response.status == 304:
|
||||
# We must have sent an ETag request. This could mean
|
||||
# that we've been expired already or that we simply
|
||||
# have an etag. In either case, we want to try and
|
||||
# update the cache if that is the case.
|
||||
cached_response = self.controller.update_cached_response(
|
||||
request, response
|
||||
)
|
||||
|
||||
if cached_response is not response:
|
||||
from_cache = True
|
||||
|
||||
response = cached_response
|
||||
else:
|
||||
# Wrap the response file with a wrapper that will cache the
|
||||
# response when the stream has been consumed.
|
||||
response._fp = CallbackFileWrapper(
|
||||
response._fp,
|
||||
functools.partial(
|
||||
self.controller.cache_response,
|
||||
request,
|
||||
response,
|
||||
)
|
||||
)
|
||||
|
||||
resp = super(CacheControlAdapter, self).build_response(
|
||||
request, response
|
||||
)
|
||||
|
||||
# See if we should invalidate the cache.
|
||||
if request.method in self.invalidating_methods and resp.ok:
|
||||
cache_url = self.controller.cache_url(request.url)
|
||||
self.cache.delete(cache_url)
|
||||
|
||||
# Give the request a from_cache attr to let people use it
|
||||
resp.from_cache = from_cache
|
||||
|
||||
return resp
|
|
@ -0,0 +1,36 @@
|
|||
"""
|
||||
The cache object API for implementing caches. The default is just a
|
||||
dictionary, which in turns means it is not threadsafe for writing.
|
||||
"""
|
||||
from threading import Lock
|
||||
|
||||
|
||||
class BaseCache(object):
|
||||
|
||||
def get(self, key):
|
||||
raise NotImplemented()
|
||||
|
||||
def set(self, key, value):
|
||||
raise NotImplemented()
|
||||
|
||||
def delete(self, key):
|
||||
raise NotImplemented()
|
||||
|
||||
|
||||
class DictCache(BaseCache):
|
||||
|
||||
def __init__(self, init_dict=None):
|
||||
self.lock = Lock()
|
||||
self.data = init_dict or {}
|
||||
|
||||
def get(self, key):
|
||||
return self.data.get(key, None)
|
||||
|
||||
def set(self, key, value):
|
||||
with self.lock:
|
||||
self.data.update({key: value})
|
||||
|
||||
def delete(self, key):
|
||||
with self.lock:
|
||||
if key in self.data:
|
||||
self.data.pop(key)
|
|
@ -0,0 +1,18 @@
|
|||
from textwrap import dedent
|
||||
|
||||
try:
|
||||
from .file_cache import FileCache
|
||||
except ImportError:
|
||||
notice = dedent('''
|
||||
NOTE: In order to use the FileCache you must have
|
||||
lockfile installed. You can install it via pip:
|
||||
pip install lockfile
|
||||
''')
|
||||
print(notice)
|
||||
|
||||
|
||||
try:
|
||||
import redis
|
||||
from .redis_cache import RedisCache
|
||||
except ImportError:
|
||||
pass
|
|
@ -0,0 +1,89 @@
|
|||
import hashlib
|
||||
import os
|
||||
|
||||
from pip._vendor.lockfile import FileLock
|
||||
|
||||
|
||||
def _secure_open_write(filename, fmode):
|
||||
# We only want to write to this file, so open it in write only mode
|
||||
flags = os.O_WRONLY
|
||||
|
||||
# os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
|
||||
# will open *new* files.
|
||||
# We specify this because we want to ensure that the mode we pass is the
|
||||
# mode of the file.
|
||||
flags |= os.O_CREAT | os.O_EXCL
|
||||
|
||||
# Do not follow symlinks to prevent someone from making a symlink that
|
||||
# we follow and insecurely open a cache file.
|
||||
if hasattr(os, "O_NOFOLLOW"):
|
||||
flags |= os.O_NOFOLLOW
|
||||
|
||||
# On Windows we'll mark this file as binary
|
||||
if hasattr(os, "O_BINARY"):
|
||||
flags |= os.O_BINARY
|
||||
|
||||
# Before we open our file, we want to delete any existing file that is
|
||||
# there
|
||||
try:
|
||||
os.remove(filename)
|
||||
except (IOError, OSError):
|
||||
# The file must not exist already, so we can just skip ahead to opening
|
||||
pass
|
||||
|
||||
# Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
|
||||
# race condition happens between the os.remove and this line, that an
|
||||
# error will be raised. Because we utilize a lockfile this should only
|
||||
# happen if someone is attempting to attack us.
|
||||
fd = os.open(filename, flags, fmode)
|
||||
try:
|
||||
return os.fdopen(fd, "wb")
|
||||
except:
|
||||
# An error occurred wrapping our FD in a file object
|
||||
os.close(fd)
|
||||
raise
|
||||
|
||||
|
||||
class FileCache(object):
|
||||
def __init__(self, directory, forever=False, filemode=0o0600,
|
||||
dirmode=0o0700):
|
||||
self.directory = directory
|
||||
self.forever = forever
|
||||
self.filemode = filemode
|
||||
self.dirmode = dirmode
|
||||
|
||||
@staticmethod
|
||||
def encode(x):
|
||||
return hashlib.sha224(x.encode()).hexdigest()
|
||||
|
||||
def _fn(self, name):
|
||||
hashed = self.encode(name)
|
||||
parts = list(hashed[:5]) + [hashed]
|
||||
return os.path.join(self.directory, *parts)
|
||||
|
||||
def get(self, key):
|
||||
name = self._fn(key)
|
||||
if not os.path.exists(name):
|
||||
return None
|
||||
|
||||
with open(name, 'rb') as fh:
|
||||
return fh.read()
|
||||
|
||||
def set(self, key, value):
|
||||
name = self._fn(key)
|
||||
|
||||
# Make sure the directory exists
|
||||
try:
|
||||
os.makedirs(os.path.dirname(name), self.dirmode)
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
|
||||
with FileLock(name) as lock:
|
||||
# Write our actual file
|
||||
with _secure_open_write(lock.path, self.filemode) as fh:
|
||||
fh.write(value)
|
||||
|
||||
def delete(self, key):
|
||||
name = self._fn(key)
|
||||
if not self.forever:
|
||||
os.remove(name)
|
|
@ -0,0 +1,38 @@
|
|||
from __future__ import division
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
def total_seconds(td):
|
||||
"""Python 2.6 compatability"""
|
||||
if hasattr(td, 'total_seconds'):
|
||||
return td.total_seconds()
|
||||
|
||||
ms = td.microseconds
|
||||
secs = (td.seconds + td.days * 24 * 3600)
|
||||
return (ms + secs * 10**6) / 10**6
|
||||
|
||||
|
||||
class RedisCache(object):
|
||||
|
||||
def __init__(self, conn):
|
||||
self.conn = conn
|
||||
|
||||
def get(self, key):
|
||||
return self.conn.get(key)
|
||||
|
||||
def set(self, key, value, expires=None):
|
||||
if not expires:
|
||||
self.conn.set(key, value)
|
||||
else:
|
||||
expires = expires - datetime.now()
|
||||
self.conn.setex(key, total_seconds(expires), value)
|
||||
|
||||
def delete(self, key):
|
||||
self.conn.delete(key)
|
||||
|
||||
def clear(self):
|
||||
"""Helper for clearing all the keys in a database. Use with
|
||||
caution!"""
|
||||
for key in self.conn.keys():
|
||||
self.conn.delete(key)
|
|
@ -0,0 +1,24 @@
|
|||
try:
|
||||
from urllib.parse import urljoin
|
||||
except ImportError:
|
||||
from urlparse import urljoin
|
||||
|
||||
|
||||
try:
|
||||
import email.utils
|
||||
parsedate_tz = email.utils.parsedate_tz
|
||||
except ImportError:
|
||||
import email.Utils
|
||||
parsedate_tz = email.Utils.parsedate_tz
|
||||
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
|
||||
|
||||
# Handle the case where the requests has been patched to not have urllib3
|
||||
# bundled as part of it's source.
|
||||
from pip._vendor.requests.packages.urllib3.response import HTTPResponse
|
||||
from pip._vendor.requests.packages.urllib3.util import is_fp_closed
|
|
@ -0,0 +1,257 @@
|
|||
"""
|
||||
The httplib2 algorithms ported for use with requests.
|
||||
"""
|
||||
import re
|
||||
import calendar
|
||||
import time
|
||||
|
||||
from pip._vendor.requests.structures import CaseInsensitiveDict
|
||||
|
||||
from .cache import DictCache
|
||||
from .compat import parsedate_tz
|
||||
from .serialize import Serializer
|
||||
|
||||
|
||||
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
|
||||
|
||||
|
||||
def parse_uri(uri):
|
||||
"""Parses a URI using the regex given in Appendix B of RFC 3986.
|
||||
|
||||
(scheme, authority, path, query, fragment) = parse_uri(uri)
|
||||
"""
|
||||
groups = URI.match(uri).groups()
|
||||
return (groups[1], groups[3], groups[4], groups[6], groups[8])
|
||||
|
||||
|
||||
class CacheController(object):
|
||||
"""An interface to see if request should cached or not.
|
||||
"""
|
||||
def __init__(self, cache=None, cache_etags=True, serializer=None):
|
||||
self.cache = cache or DictCache()
|
||||
self.cache_etags = cache_etags
|
||||
self.serializer = serializer or Serializer()
|
||||
|
||||
def _urlnorm(self, uri):
|
||||
"""Normalize the URL to create a safe key for the cache"""
|
||||
(scheme, authority, path, query, fragment) = parse_uri(uri)
|
||||
if not scheme or not authority:
|
||||
raise Exception("Only absolute URIs are allowed. uri = %s" % uri)
|
||||
authority = authority.lower()
|
||||
scheme = scheme.lower()
|
||||
if not path:
|
||||
path = "/"
|
||||
|
||||
# Could do syntax based normalization of the URI before
|
||||
# computing the digest. See Section 6.2.2 of Std 66.
|
||||
request_uri = query and "?".join([path, query]) or path
|
||||
scheme = scheme.lower()
|
||||
defrag_uri = scheme + "://" + authority + request_uri
|
||||
|
||||
return defrag_uri
|
||||
|
||||
def cache_url(self, uri):
|
||||
return self._urlnorm(uri)
|
||||
|
||||
def parse_cache_control(self, headers):
|
||||
"""
|
||||
Parse the cache control headers returning a dictionary with values
|
||||
for the different directives.
|
||||
"""
|
||||
retval = {}
|
||||
|
||||
cc_header = 'cache-control'
|
||||
if 'Cache-Control' in headers:
|
||||
cc_header = 'Cache-Control'
|
||||
|
||||
if cc_header in headers:
|
||||
parts = headers[cc_header].split(',')
|
||||
parts_with_args = [
|
||||
tuple([x.strip().lower() for x in part.split("=", 1)])
|
||||
for part in parts if -1 != part.find("=")]
|
||||
parts_wo_args = [(name.strip().lower(), 1)
|
||||
for name in parts if -1 == name.find("=")]
|
||||
retval = dict(parts_with_args + parts_wo_args)
|
||||
return retval
|
||||
|
||||
def cached_request(self, request):
|
||||
cache_url = self.cache_url(request.url)
|
||||
cc = self.parse_cache_control(request.headers)
|
||||
|
||||
# non-caching states
|
||||
no_cache = True if 'no-cache' in cc else False
|
||||
if 'max-age' in cc and cc['max-age'] == 0:
|
||||
no_cache = True
|
||||
|
||||
# Bail out if no-cache was set
|
||||
if no_cache:
|
||||
return False
|
||||
|
||||
# It is in the cache, so lets see if it is going to be
|
||||
# fresh enough
|
||||
resp = self.serializer.loads(request, self.cache.get(cache_url))
|
||||
|
||||
# Check to see if we have a cached object
|
||||
if not resp:
|
||||
return False
|
||||
|
||||
headers = CaseInsensitiveDict(resp.headers)
|
||||
|
||||
now = time.time()
|
||||
date = calendar.timegm(
|
||||
parsedate_tz(headers['date'])
|
||||
)
|
||||
current_age = max(0, now - date)
|
||||
|
||||
# TODO: There is an assumption that the result will be a
|
||||
# urllib3 response object. This may not be best since we
|
||||
# could probably avoid instantiating or constructing the
|
||||
# response until we know we need it.
|
||||
resp_cc = self.parse_cache_control(headers)
|
||||
|
||||
# determine freshness
|
||||
freshness_lifetime = 0
|
||||
if 'max-age' in resp_cc and resp_cc['max-age'].isdigit():
|
||||
freshness_lifetime = int(resp_cc['max-age'])
|
||||
elif 'expires' in headers:
|
||||
expires = parsedate_tz(headers['expires'])
|
||||
if expires is not None:
|
||||
expire_time = calendar.timegm(expires) - date
|
||||
freshness_lifetime = max(0, expire_time)
|
||||
|
||||
# determine if we are setting freshness limit in the req
|
||||
if 'max-age' in cc:
|
||||
try:
|
||||
freshness_lifetime = int(cc['max-age'])
|
||||
except ValueError:
|
||||
freshness_lifetime = 0
|
||||
|
||||
if 'min-fresh' in cc:
|
||||
try:
|
||||
min_fresh = int(cc['min-fresh'])
|
||||
except ValueError:
|
||||
min_fresh = 0
|
||||
# adjust our current age by our min fresh
|
||||
current_age += min_fresh
|
||||
|
||||
# see how fresh we actually are
|
||||
fresh = (freshness_lifetime > current_age)
|
||||
|
||||
if fresh:
|
||||
return resp
|
||||
|
||||
# we're not fresh. If we don't have an Etag, clear it out
|
||||
if 'etag' not in headers:
|
||||
self.cache.delete(cache_url)
|
||||
|
||||
# return the original handler
|
||||
return False
|
||||
|
||||
def conditional_headers(self, request):
|
||||
cache_url = self.cache_url(request.url)
|
||||
resp = self.serializer.loads(request, self.cache.get(cache_url))
|
||||
new_headers = {}
|
||||
|
||||
if resp:
|
||||
headers = CaseInsensitiveDict(resp.headers)
|
||||
|
||||
if 'etag' in headers:
|
||||
new_headers['If-None-Match'] = headers['ETag']
|
||||
|
||||
if 'last-modified' in headers:
|
||||
new_headers['If-Modified-Since'] = headers['Last-Modified']
|
||||
|
||||
return new_headers
|
||||
|
||||
def cache_response(self, request, response, body=None):
|
||||
"""
|
||||
Algorithm for caching requests.
|
||||
|
||||
This assumes a requests Response object.
|
||||
"""
|
||||
# From httplib2: Don't cache 206's since we aren't going to
|
||||
# handle byte range requests
|
||||
if response.status not in [200, 203]:
|
||||
return
|
||||
|
||||
response_headers = CaseInsensitiveDict(response.headers)
|
||||
|
||||
cc_req = self.parse_cache_control(request.headers)
|
||||
cc = self.parse_cache_control(response_headers)
|
||||
|
||||
cache_url = self.cache_url(request.url)
|
||||
|
||||
# Delete it from the cache if we happen to have it stored there
|
||||
no_store = cc.get('no-store') or cc_req.get('no-store')
|
||||
if no_store and self.cache.get(cache_url):
|
||||
self.cache.delete(cache_url)
|
||||
|
||||
# If we've been given an etag, then keep the response
|
||||
if self.cache_etags and 'etag' in response_headers:
|
||||
self.cache.set(
|
||||
cache_url,
|
||||
self.serializer.dumps(request, response, body=body),
|
||||
)
|
||||
|
||||
# Add to the cache if the response headers demand it. If there
|
||||
# is no date header then we can't do anything about expiring
|
||||
# the cache.
|
||||
elif 'date' in response_headers:
|
||||
# cache when there is a max-age > 0
|
||||
if cc and cc.get('max-age'):
|
||||
if int(cc['max-age']) > 0:
|
||||
self.cache.set(
|
||||
cache_url,
|
||||
self.serializer.dumps(request, response, body=body),
|
||||
)
|
||||
|
||||
# If the request can expire, it means we should cache it
|
||||
# in the meantime.
|
||||
elif 'expires' in response_headers:
|
||||
if response_headers['expires']:
|
||||
self.cache.set(
|
||||
cache_url,
|
||||
self.serializer.dumps(request, response, body=body),
|
||||
)
|
||||
|
||||
def update_cached_response(self, request, response):
|
||||
"""On a 304 we will get a new set of headers that we want to
|
||||
update our cached value with, assuming we have one.
|
||||
|
||||
This should only ever be called when we've sent an ETag and
|
||||
gotten a 304 as the response.
|
||||
"""
|
||||
cache_url = self.cache_url(request.url)
|
||||
|
||||
cached_response = self.serializer.loads(request, self.cache.get(cache_url))
|
||||
|
||||
if not cached_response:
|
||||
# we didn't have a cached response
|
||||
return response
|
||||
|
||||
# Lets update our headers with the headers from the new request:
|
||||
# http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
|
||||
#
|
||||
# The server isn't supposed to send headers that would make
|
||||
# the cached body invalid. But... just in case, we'll be sure
|
||||
# to strip out ones we know that might be problmatic due to
|
||||
# typical assumptions.
|
||||
excluded_headers = [
|
||||
"content-length",
|
||||
]
|
||||
|
||||
cached_response.headers.update(
|
||||
dict((k, v) for k, v in response.headers.items()
|
||||
if k.lower() not in excluded_headers)
|
||||
)
|
||||
|
||||
# we want a 200 b/c we have content via the cache
|
||||
cached_response.status = 200
|
||||
|
||||
# update our cache
|
||||
self.cache.set(
|
||||
cache_url,
|
||||
self.serializer.dumps(request, cached_response),
|
||||
)
|
||||
|
||||
return cached_response
|
|
@ -0,0 +1,35 @@
|
|||
from io import BytesIO
|
||||
|
||||
from .compat import is_fp_closed
|
||||
|
||||
|
||||
class CallbackFileWrapper(object):
|
||||
"""
|
||||
Small wrapper around a fp object which will tee everything read into a
|
||||
buffer, and when that file is closed it will execute a callback with the
|
||||
contents of that buffer.
|
||||
|
||||
All attributes are proxied to the underlying file object.
|
||||
|
||||
This class uses members with a double underscore (__) leading prefix so as
|
||||
not to accidentally shadow an attribute.
|
||||
"""
|
||||
|
||||
def __init__(self, fp, callback):
|
||||
self.__buf = BytesIO()
|
||||
self.__fp = fp
|
||||
self.__callback = callback
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.__fp, name)
|
||||
|
||||
def read(self, amt=None):
|
||||
data = self.__fp.read(amt)
|
||||
self.__buf.write(data)
|
||||
|
||||
# Is this the best way to figure out if the file has been completely
|
||||
# consumed?
|
||||
if is_fp_closed(self.__fp):
|
||||
self.__callback(self.__buf.getvalue())
|
||||
|
||||
return data
|
|
@ -0,0 +1,96 @@
|
|||
import io
|
||||
|
||||
from pip._vendor.requests.structures import CaseInsensitiveDict
|
||||
|
||||
from .compat import HTTPResponse, pickle
|
||||
|
||||
|
||||
class Serializer(object):
|
||||
|
||||
def dumps(self, request, response, body=None):
|
||||
response_headers = CaseInsensitiveDict(response.headers)
|
||||
|
||||
if body is None:
|
||||
body = response.read(decode_content=False)
|
||||
response._fp = io.BytesIO(body)
|
||||
|
||||
data = {
|
||||
"response": {
|
||||
"body": body,
|
||||
"headers": response.headers,
|
||||
"status": response.status,
|
||||
"version": response.version,
|
||||
"reason": response.reason,
|
||||
"strict": response.strict,
|
||||
"decode_content": response.decode_content,
|
||||
},
|
||||
}
|
||||
|
||||
# Construct our vary headers
|
||||
data["vary"] = {}
|
||||
if "vary" in response_headers:
|
||||
varied_headers = response_headers['vary'].split(',')
|
||||
for header in varied_headers:
|
||||
header = header.strip()
|
||||
data["vary"][header] = request.headers.get(header, None)
|
||||
|
||||
return b"cc=1," + pickle.dumps(data, pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
def loads(self, request, data):
|
||||
# Short circuit if we've been given an empty set of data
|
||||
if not data:
|
||||
return
|
||||
|
||||
# Determine what version of the serializer the data was serialized
|
||||
# with
|
||||
try:
|
||||
ver, data = data.split(b",", 1)
|
||||
except ValueError:
|
||||
ver = b"cc=0"
|
||||
|
||||
# Make sure that our "ver" is actually a version and isn't a false
|
||||
# positive from a , being in the data stream.
|
||||
if ver[:3] != b"cc=":
|
||||
data = ver + data
|
||||
ver = b"cc=0"
|
||||
|
||||
# Get the version number out of the cc=N
|
||||
ver = ver.split(b"=", 1)[-1].decode("ascii")
|
||||
|
||||
# Dispatch to the actual load method for the given version
|
||||
try:
|
||||
return getattr(self, "_loads_v{0}".format(ver))(request, data)
|
||||
except AttributeError:
|
||||
# This is a version we don't have a loads function for, so we'll
|
||||
# just treat it as a miss and return None
|
||||
return
|
||||
|
||||
def _loads_v0(self, request, data):
|
||||
# The original legacy cache data. This doesn't contain enough
|
||||
# information to construct everything we need, so we'll treat this as
|
||||
# a miss.
|
||||
return
|
||||
|
||||
def _loads_v1(self, request, data):
|
||||
try:
|
||||
cached = pickle.loads(data)
|
||||
except ValueError:
|
||||
return
|
||||
|
||||
# Special case the '*' Vary value as it means we cannot actually
|
||||
# determine if the cached response is suitable for this request.
|
||||
if "*" in cached.get("vary", {}):
|
||||
return
|
||||
|
||||
# Ensure that the Vary headers for the cached response match our
|
||||
# request
|
||||
for header, value in cached.get("vary", {}).items():
|
||||
if request.headers.get(header, None) != value:
|
||||
return
|
||||
|
||||
body = io.BytesIO(cached["response"].pop("body"))
|
||||
return HTTPResponse(
|
||||
body=body,
|
||||
preload_content=False,
|
||||
**cached["response"]
|
||||
)
|
|
@ -0,0 +1,15 @@
|
|||
from .adapter import CacheControlAdapter
|
||||
from .cache import DictCache
|
||||
|
||||
|
||||
def CacheControl(sess, cache=None, cache_etags=True, serializer=None):
|
||||
cache = cache or DictCache()
|
||||
adapter = CacheControlAdapter(
|
||||
cache,
|
||||
cache_etags=cache_etags,
|
||||
serializer=serializer,
|
||||
)
|
||||
sess.mount('http://', adapter)
|
||||
sess.mount('https://', adapter)
|
||||
|
||||
return sess
|
|
@ -0,0 +1,317 @@
|
|||
"""
|
||||
lockfile.py - Platform-independent advisory file locks.
|
||||
|
||||
Requires Python 2.5 unless you apply 2.4.diff
|
||||
Locking is done on a per-thread basis instead of a per-process basis.
|
||||
|
||||
Usage:
|
||||
|
||||
>>> lock = LockFile('somefile')
|
||||
>>> try:
|
||||
... lock.acquire()
|
||||
... except AlreadyLocked:
|
||||
... print 'somefile', 'is locked already.'
|
||||
... except LockFailed:
|
||||
... print 'somefile', 'can\\'t be locked.'
|
||||
... else:
|
||||
... print 'got lock'
|
||||
got lock
|
||||
>>> print lock.is_locked()
|
||||
True
|
||||
>>> lock.release()
|
||||
|
||||
>>> lock = LockFile('somefile')
|
||||
>>> print lock.is_locked()
|
||||
False
|
||||
>>> with lock:
|
||||
... print lock.is_locked()
|
||||
True
|
||||
>>> print lock.is_locked()
|
||||
False
|
||||
|
||||
>>> lock = LockFile('somefile')
|
||||
>>> # It is okay to lock twice from the same thread...
|
||||
>>> with lock:
|
||||
... lock.acquire()
|
||||
...
|
||||
>>> # Though no counter is kept, so you can't unlock multiple times...
|
||||
>>> print lock.is_locked()
|
||||
False
|
||||
|
||||
Exceptions:
|
||||
|
||||
Error - base class for other exceptions
|
||||
LockError - base class for all locking exceptions
|
||||
AlreadyLocked - Another thread or process already holds the lock
|
||||
LockFailed - Lock failed for some other reason
|
||||
UnlockError - base class for all unlocking exceptions
|
||||
AlreadyUnlocked - File was not locked.
|
||||
NotMyLock - File was locked but not by the current thread/process
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import sys
|
||||
import socket
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
import urllib
|
||||
import warnings
|
||||
import functools
|
||||
|
||||
# Work with PEP8 and non-PEP8 versions of threading module.
|
||||
if not hasattr(threading, "current_thread"):
|
||||
threading.current_thread = threading.currentThread
|
||||
if not hasattr(threading.Thread, "get_name"):
|
||||
threading.Thread.get_name = threading.Thread.getName
|
||||
|
||||
__all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked',
|
||||
'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock',
|
||||
'LinkLockFile', 'MkdirLockFile', 'SQLiteLockFile',
|
||||
'LockBase', 'locked']
|
||||
|
||||
class Error(Exception):
|
||||
"""
|
||||
Base class for other exceptions.
|
||||
|
||||
>>> try:
|
||||
... raise Error
|
||||
... except Exception:
|
||||
... pass
|
||||
"""
|
||||
pass
|
||||
|
||||
class LockError(Error):
|
||||
"""
|
||||
Base class for error arising from attempts to acquire the lock.
|
||||
|
||||
>>> try:
|
||||
... raise LockError
|
||||
... except Error:
|
||||
... pass
|
||||
"""
|
||||
pass
|
||||
|
||||
class LockTimeout(LockError):
|
||||
"""Raised when lock creation fails within a user-defined period of time.
|
||||
|
||||
>>> try:
|
||||
... raise LockTimeout
|
||||
... except LockError:
|
||||
... pass
|
||||
"""
|
||||
pass
|
||||
|
||||
class AlreadyLocked(LockError):
|
||||
"""Some other thread/process is locking the file.
|
||||
|
||||
>>> try:
|
||||
... raise AlreadyLocked
|
||||
... except LockError:
|
||||
... pass
|
||||
"""
|
||||
pass
|
||||
|
||||
class LockFailed(LockError):
|
||||
"""Lock file creation failed for some other reason.
|
||||
|
||||
>>> try:
|
||||
... raise LockFailed
|
||||
... except LockError:
|
||||
... pass
|
||||
"""
|
||||
pass
|
||||
|
||||
class UnlockError(Error):
|
||||
"""
|
||||
Base class for errors arising from attempts to release the lock.
|
||||
|
||||
>>> try:
|
||||
... raise UnlockError
|
||||
... except Error:
|
||||
... pass
|
||||
"""
|
||||
pass
|
||||
|
||||
class NotLocked(UnlockError):
|
||||
"""Raised when an attempt is made to unlock an unlocked file.
|
||||
|
||||
>>> try:
|
||||
... raise NotLocked
|
||||
... except UnlockError:
|
||||
... pass
|
||||
"""
|
||||
pass
|
||||
|
||||
class NotMyLock(UnlockError):
|
||||
"""Raised when an attempt is made to unlock a file someone else locked.
|
||||
|
||||
>>> try:
|
||||
... raise NotMyLock
|
||||
... except UnlockError:
|
||||
... pass
|
||||
"""
|
||||
pass
|
||||
|
||||
class LockBase:
|
||||
"""Base class for platform-specific lock classes."""
|
||||
def __init__(self, path, threaded=True, timeout=None):
|
||||
"""
|
||||
>>> lock = LockBase('somefile')
|
||||
>>> lock = LockBase('somefile', threaded=False)
|
||||
"""
|
||||
self.path = path
|
||||
self.lock_file = os.path.abspath(path) + ".lock"
|
||||
self.hostname = socket.gethostname()
|
||||
self.pid = os.getpid()
|
||||
if threaded:
|
||||
t = threading.current_thread()
|
||||
# Thread objects in Python 2.4 and earlier do not have ident
|
||||
# attrs. Worm around that.
|
||||
ident = getattr(t, "ident", hash(t))
|
||||
self.tname = "-%x" % (ident & 0xffffffff)
|
||||
else:
|
||||
self.tname = ""
|
||||
dirname = os.path.dirname(self.lock_file)
|
||||
self.unique_name = os.path.join(dirname,
|
||||
"%s%s.%s" % (self.hostname,
|
||||
self.tname,
|
||||
self.pid))
|
||||
self.timeout = timeout
|
||||
|
||||
def acquire(self, timeout=None):
|
||||
"""
|
||||
Acquire the lock.
|
||||
|
||||
* If timeout is omitted (or None), wait forever trying to lock the
|
||||
file.
|
||||
|
||||
* If timeout > 0, try to acquire the lock for that many seconds. If
|
||||
the lock period expires and the file is still locked, raise
|
||||
LockTimeout.
|
||||
|
||||
* If timeout <= 0, raise AlreadyLocked immediately if the file is
|
||||
already locked.
|
||||
"""
|
||||
raise NotImplemented("implement in subclass")
|
||||
|
||||
def release(self):
|
||||
"""
|
||||
Release the lock.
|
||||
|
||||
If the file is not locked, raise NotLocked.
|
||||
"""
|
||||
raise NotImplemented("implement in subclass")
|
||||
|
||||
def is_locked(self):
|
||||
"""
|
||||
Tell whether or not the file is locked.
|
||||
"""
|
||||
raise NotImplemented("implement in subclass")
|
||||
|
||||
def i_am_locking(self):
|
||||
"""
|
||||
Return True if this object is locking the file.
|
||||
"""
|
||||
raise NotImplemented("implement in subclass")
|
||||
|
||||
def break_lock(self):
|
||||
"""
|
||||
Remove a lock. Useful if a locking thread failed to unlock.
|
||||
"""
|
||||
raise NotImplemented("implement in subclass")
|
||||
|
||||
def __enter__(self):
|
||||
"""
|
||||
Context manager support.
|
||||
"""
|
||||
self.acquire()
|
||||
return self
|
||||
|
||||
def __exit__(self, *_exc):
|
||||
"""
|
||||
Context manager support.
|
||||
"""
|
||||
self.release()
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %r -- %r>" % (self.__class__.__name__, self.unique_name,
|
||||
self.path)
|
||||
|
||||
def _fl_helper(cls, mod, *args, **kwds):
|
||||
warnings.warn("Import from %s module instead of lockfile package" % mod,
|
||||
DeprecationWarning, stacklevel=2)
|
||||
# This is a bit funky, but it's only for awhile. The way the unit tests
|
||||
# are constructed this function winds up as an unbound method, so it
|
||||
# actually takes three args, not two. We want to toss out self.
|
||||
if not isinstance(args[0], str):
|
||||
# We are testing, avoid the first arg
|
||||
args = args[1:]
|
||||
if len(args) == 1 and not kwds:
|
||||
kwds["threaded"] = True
|
||||
return cls(*args, **kwds)
|
||||
|
||||
def LinkFileLock(*args, **kwds):
|
||||
"""Factory function provided for backwards compatibility.
|
||||
|
||||
Do not use in new code. Instead, import LinkLockFile from the
|
||||
lockfile.linklockfile module.
|
||||
"""
|
||||
from . import linklockfile
|
||||
return _fl_helper(linklockfile.LinkLockFile, "lockfile.linklockfile",
|
||||
*args, **kwds)
|
||||
|
||||
def MkdirFileLock(*args, **kwds):
|
||||
"""Factory function provided for backwards compatibility.
|
||||
|
||||
Do not use in new code. Instead, import MkdirLockFile from the
|
||||
lockfile.mkdirlockfile module.
|
||||
"""
|
||||
from . import mkdirlockfile
|
||||
return _fl_helper(mkdirlockfile.MkdirLockFile, "lockfile.mkdirlockfile",
|
||||
*args, **kwds)
|
||||
|
||||
def SQLiteFileLock(*args, **kwds):
|
||||
"""Factory function provided for backwards compatibility.
|
||||
|
||||
Do not use in new code. Instead, import SQLiteLockFile from the
|
||||
lockfile.mkdirlockfile module.
|
||||
"""
|
||||
from . import sqlitelockfile
|
||||
return _fl_helper(sqlitelockfile.SQLiteLockFile, "lockfile.sqlitelockfile",
|
||||
*args, **kwds)
|
||||
|
||||
def locked(path, timeout=None):
|
||||
"""Decorator which enables locks for decorated function.
|
||||
|
||||
Arguments:
|
||||
- path: path for lockfile.
|
||||
- timeout (optional): Timeout for acquiring lock.
|
||||
|
||||
Usage:
|
||||
@locked('/var/run/myname', timeout=0)
|
||||
def myname(...):
|
||||
...
|
||||
"""
|
||||
def decor(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
lock = FileLock(path, timeout=timeout)
|
||||
lock.acquire()
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
finally:
|
||||
lock.release()
|
||||
return wrapper
|
||||
return decor
|
||||
|
||||
if hasattr(os, "link"):
|
||||
from . import linklockfile as _llf
|
||||
LockFile = _llf.LinkLockFile
|
||||
else:
|
||||
from . import mkdirlockfile as _mlf
|
||||
LockFile = _mlf.MkdirLockFile
|
||||
|
||||
FileLock = LockFile
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
import time
|
||||
import os
|
||||
|
||||
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
|
||||
AlreadyLocked)
|
||||
|
||||
class LinkLockFile(LockBase):
|
||||
"""Lock access to a file using atomic property of link(2).
|
||||
|
||||
>>> lock = LinkLockFile('somefile')
|
||||
>>> lock = LinkLockFile('somefile', threaded=False)
|
||||
"""
|
||||
|
||||
def acquire(self, timeout=None):
|
||||
try:
|
||||
open(self.unique_name, "wb").close()
|
||||
except IOError:
|
||||
raise LockFailed("failed to create %s" % self.unique_name)
|
||||
|
||||
timeout = timeout or self.timeout
|
||||
end_time = time.time()
|
||||
if timeout is not None and timeout > 0:
|
||||
end_time += timeout
|
||||
|
||||
while True:
|
||||
# Try and create a hard link to it.
|
||||
try:
|
||||
os.link(self.unique_name, self.lock_file)
|
||||
except OSError:
|
||||
# Link creation failed. Maybe we've double-locked?
|
||||
nlinks = os.stat(self.unique_name).st_nlink
|
||||
if nlinks == 2:
|
||||
# The original link plus the one I created == 2. We're
|
||||
# good to go.
|
||||
return
|
||||
else:
|
||||
# Otherwise the lock creation failed.
|
||||
if timeout is not None and time.time() > end_time:
|
||||
os.unlink(self.unique_name)
|
||||
if timeout > 0:
|
||||
raise LockTimeout("Timeout waiting to acquire"
|
||||
" lock for %s" %
|
||||
self.path)
|
||||
else:
|
||||
raise AlreadyLocked("%s is already locked" %
|
||||
self.path)
|
||||
time.sleep(timeout is not None and timeout/10 or 0.1)
|
||||
else:
|
||||
# Link creation succeeded. We're good to go.
|
||||
return
|
||||
|
||||
def release(self):
|
||||
if not self.is_locked():
|
||||
raise NotLocked("%s is not locked" % self.path)
|
||||
elif not os.path.exists(self.unique_name):
|
||||
raise NotMyLock("%s is locked, but not by me" % self.path)
|
||||
os.unlink(self.unique_name)
|
||||
os.unlink(self.lock_file)
|
||||
|
||||
def is_locked(self):
|
||||
return os.path.exists(self.lock_file)
|
||||
|
||||
def i_am_locking(self):
|
||||
return (self.is_locked() and
|
||||
os.path.exists(self.unique_name) and
|
||||
os.stat(self.unique_name).st_nlink == 2)
|
||||
|
||||
def break_lock(self):
|
||||
if os.path.exists(self.lock_file):
|
||||
os.unlink(self.lock_file)
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
from __future__ import absolute_import, division
|
||||
|
||||
import time
|
||||
import os
|
||||
import sys
|
||||
import errno
|
||||
|
||||
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
|
||||
AlreadyLocked)
|
||||
|
||||
class MkdirLockFile(LockBase):
|
||||
"""Lock file by creating a directory."""
|
||||
def __init__(self, path, threaded=True, timeout=None):
|
||||
"""
|
||||
>>> lock = MkdirLockFile('somefile')
|
||||
>>> lock = MkdirLockFile('somefile', threaded=False)
|
||||
"""
|
||||
LockBase.__init__(self, path, threaded, timeout)
|
||||
# Lock file itself is a directory. Place the unique file name into
|
||||
# it.
|
||||
self.unique_name = os.path.join(self.lock_file,
|
||||
"%s.%s%s" % (self.hostname,
|
||||
self.tname,
|
||||
self.pid))
|
||||
|
||||
def acquire(self, timeout=None):
|
||||
timeout = timeout or self.timeout
|
||||
end_time = time.time()
|
||||
if timeout is not None and timeout > 0:
|
||||
end_time += timeout
|
||||
|
||||
if timeout is None:
|
||||
wait = 0.1
|
||||
else:
|
||||
wait = max(0, timeout / 10)
|
||||
|
||||
while True:
|
||||
try:
|
||||
os.mkdir(self.lock_file)
|
||||
except OSError:
|
||||
err = sys.exc_info()[1]
|
||||
if err.errno == errno.EEXIST:
|
||||
# Already locked.
|
||||
if os.path.exists(self.unique_name):
|
||||
# Already locked by me.
|
||||
return
|
||||
if timeout is not None and time.time() > end_time:
|
||||
if timeout > 0:
|
||||
raise LockTimeout("Timeout waiting to acquire"
|
||||
" lock for %s" %
|
||||
self.path)
|
||||
else:
|
||||
# Someone else has the lock.
|
||||
raise AlreadyLocked("%s is already locked" %
|
||||
self.path)
|
||||
time.sleep(wait)
|
||||
else:
|
||||
# Couldn't create the lock for some other reason
|
||||
raise LockFailed("failed to create %s" % self.lock_file)
|
||||
else:
|
||||
open(self.unique_name, "wb").close()
|
||||
return
|
||||
|
||||
def release(self):
|
||||
if not self.is_locked():
|
||||
raise NotLocked("%s is not locked" % self.path)
|
||||
elif not os.path.exists(self.unique_name):
|
||||
raise NotMyLock("%s is locked, but not by me" % self.path)
|
||||
os.unlink(self.unique_name)
|
||||
os.rmdir(self.lock_file)
|
||||
|
||||
def is_locked(self):
|
||||
return os.path.exists(self.lock_file)
|
||||
|
||||
def i_am_locking(self):
|
||||
return (self.is_locked() and
|
||||
os.path.exists(self.unique_name))
|
||||
|
||||
def break_lock(self):
|
||||
if os.path.exists(self.lock_file):
|
||||
for name in os.listdir(self.lock_file):
|
||||
os.unlink(os.path.join(self.lock_file, name))
|
||||
os.rmdir(self.lock_file)
|
|
@ -0,0 +1,193 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# pidlockfile.py
|
||||
#
|
||||
# Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au>
|
||||
#
|
||||
# This is free software: you may copy, modify, and/or distribute this work
|
||||
# under the terms of the Python Software Foundation License, version 2 or
|
||||
# later as published by the Python Software Foundation.
|
||||
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
|
||||
|
||||
""" Lockfile behaviour implemented via Unix PID files.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
import sys
|
||||
import errno
|
||||
import time
|
||||
|
||||
from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock,
|
||||
LockTimeout)
|
||||
|
||||
|
||||
class PIDLockFile(LockBase):
|
||||
""" Lockfile implemented as a Unix PID file.
|
||||
|
||||
The lock file is a normal file named by the attribute `path`.
|
||||
A lock's PID file contains a single line of text, containing
|
||||
the process ID (PID) of the process that acquired the lock.
|
||||
|
||||
>>> lock = PIDLockFile('somefile')
|
||||
>>> lock = PIDLockFile('somefile')
|
||||
"""
|
||||
|
||||
def __init__(self, path, threaded=False, timeout=None):
|
||||
# pid lockfiles don't support threaded operation, so always force
|
||||
# False as the threaded arg.
|
||||
LockBase.__init__(self, path, False, timeout)
|
||||
dirname = os.path.dirname(self.lock_file)
|
||||
basename = os.path.split(self.path)[-1]
|
||||
self.unique_name = self.path
|
||||
|
||||
def read_pid(self):
|
||||
""" Get the PID from the lock file.
|
||||
"""
|
||||
return read_pid_from_pidfile(self.path)
|
||||
|
||||
def is_locked(self):
|
||||
""" Test if the lock is currently held.
|
||||
|
||||
The lock is held if the PID file for this lock exists.
|
||||
|
||||
"""
|
||||
return os.path.exists(self.path)
|
||||
|
||||
def i_am_locking(self):
|
||||
""" Test if the lock is held by the current process.
|
||||
|
||||
Returns ``True`` if the current process ID matches the
|
||||
number stored in the PID file.
|
||||
"""
|
||||
return self.is_locked() and os.getpid() == self.read_pid()
|
||||
|
||||
def acquire(self, timeout=None):
|
||||
""" Acquire the lock.
|
||||
|
||||
Creates the PID file for this lock, or raises an error if
|
||||
the lock could not be acquired.
|
||||
"""
|
||||
|
||||
timeout = timeout or self.timeout
|
||||
end_time = time.time()
|
||||
if timeout is not None and timeout > 0:
|
||||
end_time += timeout
|
||||
|
||||
while True:
|
||||
try:
|
||||
write_pid_to_pidfile(self.path)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EEXIST:
|
||||
# The lock creation failed. Maybe sleep a bit.
|
||||
if timeout is not None and time.time() > end_time:
|
||||
if timeout > 0:
|
||||
raise LockTimeout("Timeout waiting to acquire"
|
||||
" lock for %s" %
|
||||
self.path)
|
||||
else:
|
||||
raise AlreadyLocked("%s is already locked" %
|
||||
self.path)
|
||||
time.sleep(timeout is not None and timeout/10 or 0.1)
|
||||
else:
|
||||
raise LockFailed("failed to create %s" % self.path)
|
||||
else:
|
||||
return
|
||||
|
||||
def release(self):
|
||||
""" Release the lock.
|
||||
|
||||
Removes the PID file to release the lock, or raises an
|
||||
error if the current process does not hold the lock.
|
||||
|
||||
"""
|
||||
if not self.is_locked():
|
||||
raise NotLocked("%s is not locked" % self.path)
|
||||
if not self.i_am_locking():
|
||||
raise NotMyLock("%s is locked, but not by me" % self.path)
|
||||
remove_existing_pidfile(self.path)
|
||||
|
||||
def break_lock(self):
|
||||
""" Break an existing lock.
|
||||
|
||||
Removes the PID file if it already exists, otherwise does
|
||||
nothing.
|
||||
|
||||
"""
|
||||
remove_existing_pidfile(self.path)
|
||||
|
||||
def read_pid_from_pidfile(pidfile_path):
|
||||
""" Read the PID recorded in the named PID file.
|
||||
|
||||
Read and return the numeric PID recorded as text in the named
|
||||
PID file. If the PID file cannot be read, or if the content is
|
||||
not a valid PID, return ``None``.
|
||||
|
||||
"""
|
||||
pid = None
|
||||
try:
|
||||
pidfile = open(pidfile_path, 'r')
|
||||
except IOError:
|
||||
pass
|
||||
else:
|
||||
# According to the FHS 2.3 section on PID files in /var/run:
|
||||
#
|
||||
# The file must consist of the process identifier in
|
||||
# ASCII-encoded decimal, followed by a newline character.
|
||||
#
|
||||
# Programs that read PID files should be somewhat flexible
|
||||
# in what they accept; i.e., they should ignore extra
|
||||
# whitespace, leading zeroes, absence of the trailing
|
||||
# newline, or additional lines in the PID file.
|
||||
|
||||
line = pidfile.readline().strip()
|
||||
try:
|
||||
pid = int(line)
|
||||
except ValueError:
|
||||
pass
|
||||
pidfile.close()
|
||||
|
||||
return pid
|
||||
|
||||
|
||||
def write_pid_to_pidfile(pidfile_path):
|
||||
""" Write the PID in the named PID file.
|
||||
|
||||
Get the numeric process ID (“PID”) of the current process
|
||||
and write it to the named file as a line of text.
|
||||
|
||||
"""
|
||||
open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
|
||||
open_mode = 0o644
|
||||
pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
|
||||
pidfile = os.fdopen(pidfile_fd, 'w')
|
||||
|
||||
# According to the FHS 2.3 section on PID files in /var/run:
|
||||
#
|
||||
# The file must consist of the process identifier in
|
||||
# ASCII-encoded decimal, followed by a newline character. For
|
||||
# example, if crond was process number 25, /var/run/crond.pid
|
||||
# would contain three characters: two, five, and newline.
|
||||
|
||||
pid = os.getpid()
|
||||
line = "%(pid)d\n" % vars()
|
||||
pidfile.write(line)
|
||||
pidfile.close()
|
||||
|
||||
|
||||
def remove_existing_pidfile(pidfile_path):
|
||||
""" Remove the named PID file if it exists.
|
||||
|
||||
Removing a PID file that doesn't already exist puts us in the
|
||||
desired state, so we ignore the condition if the file does not
|
||||
exist.
|
||||
|
||||
"""
|
||||
try:
|
||||
os.remove(pidfile_path)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.ENOENT:
|
||||
pass
|
||||
else:
|
||||
raise
|
|
@ -0,0 +1,155 @@
|
|||
from __future__ import absolute_import, division
|
||||
|
||||
import time
|
||||
import os
|
||||
|
||||
try:
|
||||
unicode
|
||||
except NameError:
|
||||
unicode = str
|
||||
|
||||
from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked
|
||||
|
||||
class SQLiteLockFile(LockBase):
|
||||
"Demonstrate SQL-based locking."
|
||||
|
||||
testdb = None
|
||||
|
||||
def __init__(self, path, threaded=True, timeout=None):
|
||||
"""
|
||||
>>> lock = SQLiteLockFile('somefile')
|
||||
>>> lock = SQLiteLockFile('somefile', threaded=False)
|
||||
"""
|
||||
LockBase.__init__(self, path, threaded, timeout)
|
||||
self.lock_file = unicode(self.lock_file)
|
||||
self.unique_name = unicode(self.unique_name)
|
||||
|
||||
if SQLiteLockFile.testdb is None:
|
||||
import tempfile
|
||||
_fd, testdb = tempfile.mkstemp()
|
||||
os.close(_fd)
|
||||
os.unlink(testdb)
|
||||
del _fd, tempfile
|
||||
SQLiteLockFile.testdb = testdb
|
||||
|
||||
import sqlite3
|
||||
self.connection = sqlite3.connect(SQLiteLockFile.testdb)
|
||||
|
||||
c = self.connection.cursor()
|
||||
try:
|
||||
c.execute("create table locks"
|
||||
"("
|
||||
" lock_file varchar(32),"
|
||||
" unique_name varchar(32)"
|
||||
")")
|
||||
except sqlite3.OperationalError:
|
||||
pass
|
||||
else:
|
||||
self.connection.commit()
|
||||
import atexit
|
||||
atexit.register(os.unlink, SQLiteLockFile.testdb)
|
||||
|
||||
def acquire(self, timeout=None):
|
||||
timeout = timeout or self.timeout
|
||||
end_time = time.time()
|
||||
if timeout is not None and timeout > 0:
|
||||
end_time += timeout
|
||||
|
||||
if timeout is None:
|
||||
wait = 0.1
|
||||
elif timeout <= 0:
|
||||
wait = 0
|
||||
else:
|
||||
wait = timeout / 10
|
||||
|
||||
cursor = self.connection.cursor()
|
||||
|
||||
while True:
|
||||
if not self.is_locked():
|
||||
# Not locked. Try to lock it.
|
||||
cursor.execute("insert into locks"
|
||||
" (lock_file, unique_name)"
|
||||
" values"
|
||||
" (?, ?)",
|
||||
(self.lock_file, self.unique_name))
|
||||
self.connection.commit()
|
||||
|
||||
# Check to see if we are the only lock holder.
|
||||
cursor.execute("select * from locks"
|
||||
" where unique_name = ?",
|
||||
(self.unique_name,))
|
||||
rows = cursor.fetchall()
|
||||
if len(rows) > 1:
|
||||
# Nope. Someone else got there. Remove our lock.
|
||||
cursor.execute("delete from locks"
|
||||
" where unique_name = ?",
|
||||
(self.unique_name,))
|
||||
self.connection.commit()
|
||||
else:
|
||||
# Yup. We're done, so go home.
|
||||
return
|
||||
else:
|
||||
# Check to see if we are the only lock holder.
|
||||
cursor.execute("select * from locks"
|
||||
" where unique_name = ?",
|
||||
(self.unique_name,))
|
||||
rows = cursor.fetchall()
|
||||
if len(rows) == 1:
|
||||
# We're the locker, so go home.
|
||||
return
|
||||
|
||||
# Maybe we should wait a bit longer.
|
||||
if timeout is not None and time.time() > end_time:
|
||||
if timeout > 0:
|
||||
# No more waiting.
|
||||
raise LockTimeout("Timeout waiting to acquire"
|
||||
" lock for %s" %
|
||||
self.path)
|
||||
else:
|
||||
# Someone else has the lock and we are impatient..
|
||||
raise AlreadyLocked("%s is already locked" % self.path)
|
||||
|
||||
# Well, okay. We'll give it a bit longer.
|
||||
time.sleep(wait)
|
||||
|
||||
def release(self):
|
||||
if not self.is_locked():
|
||||
raise NotLocked("%s is not locked" % self.path)
|
||||
if not self.i_am_locking():
|
||||
raise NotMyLock("%s is locked, but not by me (by %s)" %
|
||||
(self.unique_name, self._who_is_locking()))
|
||||
cursor = self.connection.cursor()
|
||||
cursor.execute("delete from locks"
|
||||
" where unique_name = ?",
|
||||
(self.unique_name,))
|
||||
self.connection.commit()
|
||||
|
||||
def _who_is_locking(self):
|
||||
cursor = self.connection.cursor()
|
||||
cursor.execute("select unique_name from locks"
|
||||
" where lock_file = ?",
|
||||
(self.lock_file,))
|
||||
return cursor.fetchone()[0]
|
||||
|
||||
def is_locked(self):
|
||||
cursor = self.connection.cursor()
|
||||
cursor.execute("select * from locks"
|
||||
" where lock_file = ?",
|
||||
(self.lock_file,))
|
||||
rows = cursor.fetchall()
|
||||
return not not rows
|
||||
|
||||
def i_am_locking(self):
|
||||
cursor = self.connection.cursor()
|
||||
cursor.execute("select * from locks"
|
||||
" where lock_file = ?"
|
||||
" and unique_name = ?",
|
||||
(self.lock_file, self.unique_name))
|
||||
return not not cursor.fetchall()
|
||||
|
||||
def break_lock(self):
|
||||
cursor = self.connection.cursor()
|
||||
cursor.execute("delete from locks"
|
||||
" where lock_file = ?",
|
||||
(self.lock_file,))
|
||||
self.connection.commit()
|
|
@ -0,0 +1,69 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
import time
|
||||
import os
|
||||
|
||||
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
|
||||
AlreadyLocked)
|
||||
|
||||
class SymlinkLockFile(LockBase):
|
||||
"""Lock access to a file using symlink(2)."""
|
||||
|
||||
def __init__(self, path, threaded=True, timeout=None):
|
||||
# super(SymlinkLockFile).__init(...)
|
||||
LockBase.__init__(self, path, threaded, timeout)
|
||||
# split it back!
|
||||
self.unique_name = os.path.split(self.unique_name)[1]
|
||||
|
||||
def acquire(self, timeout=None):
|
||||
# Hopefully unnecessary for symlink.
|
||||
#try:
|
||||
# open(self.unique_name, "wb").close()
|
||||
#except IOError:
|
||||
# raise LockFailed("failed to create %s" % self.unique_name)
|
||||
timeout = timeout or self.timeout
|
||||
end_time = time.time()
|
||||
if timeout is not None and timeout > 0:
|
||||
end_time += timeout
|
||||
|
||||
while True:
|
||||
# Try and create a symbolic link to it.
|
||||
try:
|
||||
os.symlink(self.unique_name, self.lock_file)
|
||||
except OSError:
|
||||
# Link creation failed. Maybe we've double-locked?
|
||||
if self.i_am_locking():
|
||||
# Linked to out unique name. Proceed.
|
||||
return
|
||||
else:
|
||||
# Otherwise the lock creation failed.
|
||||
if timeout is not None and time.time() > end_time:
|
||||
if timeout > 0:
|
||||
raise LockTimeout("Timeout waiting to acquire"
|
||||
" lock for %s" %
|
||||
self.path)
|
||||
else:
|
||||
raise AlreadyLocked("%s is already locked" %
|
||||
self.path)
|
||||
time.sleep(timeout/10 if timeout is not None else 0.1)
|
||||
else:
|
||||
# Link creation succeeded. We're good to go.
|
||||
return
|
||||
|
||||
def release(self):
|
||||
if not self.is_locked():
|
||||
raise NotLocked("%s is not locked" % self.path)
|
||||
elif not self.i_am_locking():
|
||||
raise NotMyLock("%s is locked, but not by me" % self.path)
|
||||
os.unlink(self.lock_file)
|
||||
|
||||
def is_locked(self):
|
||||
return os.path.islink(self.lock_file)
|
||||
|
||||
def i_am_locking(self):
|
||||
return os.path.islink(self.lock_file) and \
|
||||
os.readlink(self.lock_file) == self.unique_name
|
||||
|
||||
def break_lock(self):
|
||||
if os.path.islink(self.lock_file): # exists && link
|
||||
os.unlink(self.lock_file)
|
|
@ -3,3 +3,5 @@ html5lib==1.0b3
|
|||
six==1.6.1
|
||||
colorama==0.3.1
|
||||
requests==2.2.1
|
||||
CacheControl==0.10.1
|
||||
lockfile==0.9.1
|
||||
|
|
Loading…
Reference in New Issue