From 9e123ef4ad41c6a08260622be46e149130fa8851 Mon Sep 17 00:00:00 2001 From: Hugo Date: Sun, 26 Aug 2018 21:29:41 +0300 Subject: [PATCH 01/30] Re-enable nightly --- .travis.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index aa98bbc..2067c9f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,8 +7,7 @@ python: - 3.5 - 3.6 - pypy - # Disabled because PyYAML is failing to compile against it - # - nightly + - nightly matrix: include: - python: 3.7 From 72ff3c233f3f2d4ab1824175add3f0157de02417 Mon Sep 17 00:00:00 2001 From: Omer Katz Date: Mon, 27 Aug 2018 11:50:05 +0300 Subject: [PATCH 02/30] Optimize `_scan()`. In case that a regular expression is specified, it is not needed to copy all the keys if some of them do not match the regular expression. If a regular expression is not provided, there is no need to iterate over the slice. Instead, we simply copy the entire slice. This is useful in cases where we're dealing with a large number of keys. --- fakeredis.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/fakeredis.py b/fakeredis.py index 9c6b68d..5ce669e 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -13,7 +13,7 @@ import types import re import functools -from itertools import count +from itertools import count, islice import redis from redis.exceptions import ResponseError @@ -2046,14 +2046,15 @@ def _scan(self, keys, cursor, match, count): data = sorted(keys) result_cursor = cursor + count result_data = [] - # subset = + if match is not None: regex = _compile_pattern(match) + for val in islice(data, cursor, result_cursor): + if regex.match(to_bytes(val)): + result_data.append(val) else: - regex = None - for val in data[cursor:result_cursor]: - if not regex or regex.match(to_bytes(val)): - result_data.append(val) + result_data = data[cursor:result_cursor] + if result_cursor >= len(data): result_cursor = 0 return result_cursor, result_data From 9943e742a1e4676688bc43055cec02a9141bfc06 Mon Sep 17 00:00:00 2001 From: Alex Eshoo Date: Wed, 5 Sep 2018 09:09:18 -0400 Subject: [PATCH 03/30] added pubsub.run_in_thread as it is implemented in redis-py --- fakeredis.py | 41 ++++++++++++++++++++++++++++++++++++++++- test_fakeredis.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+), 1 deletion(-) diff --git a/fakeredis.py b/fakeredis.py index 5ce669e..bfd6309 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -16,7 +16,7 @@ from itertools import count, islice import redis -from redis.exceptions import ResponseError +from redis.exceptions import ResponseError, PubSubError import redis.client try: @@ -2454,3 +2454,42 @@ def handle_message(self, message, ignore_subscribe_messages=False): return None return message + + def run_in_thread(self, sleep_time=0, daemon=False): + for channel, handler in iteritems(self.channels): + if handler is None: + raise PubSubError("Channel: '%s' has no handler registered" % (channel,)) + for pattern, handler in iteritems(self.patterns): + if handler is None: + raise PubSubError("Pattern: '%s' has no handler registered" % (channel,)) + + thread = FakePubSubWorkerThread(self, sleep_time, daemon=daemon) + thread.start() + return thread + +class FakePubSubWorkerThread(threading.Thread): + def __init__(self, pubsub, sleep_time, daemon=False): + super(FakePubSubWorkerThread, self).__init__() + self.daemon = daemon + self.pubsub = pubsub + self.sleep_time = sleep_time + self._running = False + + def run(self): + if self._running: + return + self._running = True + pubsub = self.pubsub + sleep_time = self.sleep_time + while pubsub.subscribed: + pubsub.get_message(ignore_subscribe_messages=True, + timeout=sleep_time) + pubsub.close() + self._running = False + + def stop(self): + # stopping simply unsubscribes from all channels and patterns. + # the unsubscribe responses that are generated will short circuit + # the loop in run(), calling pubsub.close() to clean up the connection + self.pubsub.unsubscribe() + self.pubsub.punsubscribe() diff --git a/test_fakeredis.py b/test_fakeredis.py index 715559c..b26c6ef 100644 --- a/test_fakeredis.py +++ b/test_fakeredis.py @@ -3078,6 +3078,36 @@ def _listen(pubsub, q): received = q.get() self.assertEqual(received['data'], msg) + @attr('slow') + def test_pubsub_run_in_thread(self): + q = Queue() + + def _queue_msg(msg, q): + q.put(msg) + + pubsub = self.redis.pubsub() + pubsub.subscribe(**{"channel": lambda e: _queue_msg(e, q)}) + pubsub_thread = pubsub.run_in_thread() + + msg = b"Hello World" + self.redis.publish("channel", msg) + + sleep(1) + retrieved = q.get() + self.assertEqual(retrieved["data"], msg) + + pubsub_thread.stop() + pubsub_thread.join() + self.assertTrue(not pubsub_thread.is_alive()) + + pubsub.subscribe(**{"channel": None}) + with self.assertRaises(redis.exceptions.PubSubError): + pubsub_thread = pubsub.run_in_thread() + + pubsub.psubscribe(**{"channel": None}) + with self.assertRaises(redis.exceptions.PubSubError): + pubsub_thread = pubsub.run_in_thread() + def test_pfadd(self): key = "hll-pfadd" self.assertEqual( From c91248b86de81d8169567b1ff9d021d030d0f63b Mon Sep 17 00:00:00 2001 From: Alex Eshoo Date: Wed, 5 Sep 2018 09:28:32 -0400 Subject: [PATCH 04/30] added a newline. --- fakeredis.py | 1 + 1 file changed, 1 insertion(+) diff --git a/fakeredis.py b/fakeredis.py index bfd6309..c2f0498 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -2467,6 +2467,7 @@ def run_in_thread(self, sleep_time=0, daemon=False): thread.start() return thread + class FakePubSubWorkerThread(threading.Thread): def __init__(self, pubsub, sleep_time, daemon=False): super(FakePubSubWorkerThread, self).__init__() From eb2eb27ce27e935f43cc5ec8435573dcc7af13c1 Mon Sep 17 00:00:00 2001 From: Bruce Merry Date: Mon, 10 Sep 2018 11:21:18 +0200 Subject: [PATCH 05/30] Prepare for 0.13.1 release --- README.rst | 6 ++++++ fakeredis.py | 2 +- setup.py | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 23b0799..38deca1 100644 --- a/README.rst +++ b/README.rst @@ -294,6 +294,12 @@ they have all been tagged as 'slow' so you can skip them by running:: Revision history ================ +0.13.1 +------ +- `#208 `_ eval's KEYS and ARGV are now lua tables +- `#209 `_ Redis operation that returns dict now converted to Lua table when called inside eval operation +- `#212 `_ Optimize ``_scan()`` + 0.13.0.1 -------- - Fix a typo in the Trove classifiers diff --git a/fakeredis.py b/fakeredis.py index 5ce669e..6b05902 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -29,7 +29,7 @@ PY2 = sys.version_info[0] == 2 -__version__ = '0.13.0.1' +__version__ = '0.13.1' if PY2: diff --git a/setup.py b/setup.py index 1335777..e0051ff 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setup( name='fakeredis', - version='0.13.0.1', + version='0.13.1', description="Fake implementation of redis API for testing purposes.", long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(), From 310cfb1b1d4eeae80ccd74fe823fc46deceda08d Mon Sep 17 00:00:00 2001 From: da2018 Date: Tue, 11 Sep 2018 16:22:00 +0800 Subject: [PATCH 06/30] keep pace with redis-py for zrevrange method --- fakeredis.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fakeredis.py b/fakeredis.py index 6b05902..921e3ff 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -1825,19 +1825,19 @@ def zlexcount(self, name, min, max): found += 1 return found - def zrevrange(self, name, start, num, withscores=False, score_cast_func=float): + def zrevrange(self, name, start, end, withscores=False, score_cast_func=float): """ Return a range of values from sorted set ``name`` between - ``start`` and ``num`` sorted in descending order. + ``start`` and ``end`` sorted in descending order. - ``start`` and ``num`` can be negative, indicating the end of the range. + ``start`` and ``end`` can be negative, indicating the end of the range. ``withscores`` indicates to return the scores along with the values The return type is a list of (value, score) pairs ``score_cast_func`` a callable used to cast the score return value """ - return self.zrange(name, start, num, True, withscores, score_cast_func) + return self.zrange(name, start, end, True, withscores, score_cast_func) def zrevrangebyscore(self, name, max, min, start=None, num=None, withscores=False, score_cast_func=float): From 90b6844ec08c3dd7466f2217891685fba6f6b64e Mon Sep 17 00:00:00 2001 From: Bruce Merry Date: Wed, 19 Sep 2018 11:35:01 +0200 Subject: [PATCH 07/30] Added locking on commands This is not yet totally thread safe because I haven't investigated `lock`, `pipeline` and `transaction`. But the majority of commands are now thread safe. The blocking commands like blpop should still be improved to use a condition variable. --- fakeredis.py | 118 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 118 insertions(+) diff --git a/fakeredis.py b/fakeredis.py index 6b05902..cf28bb4 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -343,6 +343,14 @@ def func_wrapper(*args, **kwargs): return func_wrapper +def _locked(func): + @functools.wraps(func) + def func_wrapper(self, *args, **kwargs): + with self._lock: + return func(self, *args, **kwargs) + return func_wrapper + + class FakeStrictRedis(object): @classmethod def from_url(cls, url, db=None, **kwargs): @@ -362,6 +370,7 @@ def __init__(self, db=0, charset='utf-8', errors='strict', self._dbs = {} if db not in self._dbs: self._dbs[db] = _ExpiringDict() + self._lock = threading.RLock() self._db = self._dbs[db] self._db_num = db self._encoding = charset @@ -375,11 +384,13 @@ def __init__(self, db=0, charset='utf-8', errors='strict', _patch_responses(self, _make_decode_func) @_lua_reply(_lua_bool_ok) + @_locked def flushdb(self): self._db.clear() return True @_lua_reply(_lua_bool_ok) + @_locked def flushall(self): for db in self._dbs.values(): db.clear() @@ -410,11 +421,13 @@ def _setdefault_string(self, name): return value # Basic key commands + @_locked def append(self, key, value): self._setdefault_string(key) self._db[key] += to_bytes(value) return len(self._db[key]) + @_locked def bitcount(self, name, start=0, end=-1): if end == -1: end = None @@ -426,6 +439,7 @@ def bitcount(self, name, start=0, end=-1): except KeyError: return 0 + @_locked def decr(self, name, amount=1): try: value = int(self._get_string(name, b'0')) - amount @@ -435,13 +449,16 @@ def decr(self, name, amount=1): "range.") return value + @_locked def exists(self, name): return name in self._db __contains__ = exists + @_locked def expire(self, name, time): return self._expire(name, time) + @_locked def pexpire(self, name, millis): return self._expire(name, millis, 1000) @@ -458,9 +475,11 @@ def _expire(self, name, time, multiplier=1): else: return False + @_locked def expireat(self, name, when): return self._expireat(name, when) + @_locked def pexpireat(self, name, when): return self._expireat(name, when, 1000) @@ -473,22 +492,26 @@ def _expireat(self, name, when, multiplier=1): else: return False + @_locked def echo(self, value): if isinstance(value, text_type): return value.encode('utf-8') return value + @_locked def get(self, name): value = self._get_string(name, None) if value is not None: return to_bytes(value) + @_locked def __getitem__(self, name): value = self.get(name) if value is not None: return value raise KeyError(name) + @_locked def getbit(self, name, offset): """Returns a boolean indicating the value of ``offset`` in ``name``""" val = self._get_string(name) @@ -501,6 +524,7 @@ def getbit(self, name, offset): return 0 return 1 if (1 << actual_bitoffset) & actual_val else 0 + @_locked def getset(self, name, value): """ Set the value at key ``name`` to ``value`` if key doesn't exist @@ -510,6 +534,7 @@ def getset(self, name, value): self._db[name] = to_bytes(value) return val + @_locked def incr(self, name, amount=1): """ Increments the value of ``key`` by ``amount``. If no key exists, @@ -526,12 +551,14 @@ def incr(self, name, amount=1): "range.") return value + @_locked def incrby(self, name, amount=1): """ Alias for command ``incr`` """ return self.incr(name, amount) + @_locked def incrbyfloat(self, name, amount=1.0): try: value = float(self._get_string(name, b'0')) + amount @@ -540,11 +567,13 @@ def incrbyfloat(self, name, amount=1.0): raise redis.ResponseError("value is not a valid float.") return value + @_locked def keys(self, pattern=None): if pattern is not None: regex = _compile_pattern(pattern) return [key for key in self._db if pattern is None or regex.match(key)] + @_locked def mget(self, keys, *args): all_keys = self._list_or_args(keys, args) found = [] @@ -560,6 +589,7 @@ def mget(self, keys, *args): return found @_lua_reply(_lua_bool_ok) + @_locked def mset(self, *args, **kwargs): if args: if len(args) != 1 or not isinstance(args[0], dict): @@ -570,6 +600,7 @@ def mset(self, *args, **kwargs): self.set(key, val) return True + @_locked def msetnx(self, mapping): """ Sets each key in the ``mapping`` dict to its corresponding value if @@ -581,6 +612,7 @@ def msetnx(self, mapping): return True return False + @_locked def persist(self, name): self._db.persist(name) @@ -588,6 +620,7 @@ def ping(self): return True @_lua_reply(_lua_bool_ok) + @_locked def rename(self, src, dst): try: value = self._db[src] @@ -597,12 +630,14 @@ def rename(self, src, dst): del self._db[src] return True + @_locked def renamenx(self, src, dst): if dst in self._db: return False else: return self.rename(src, dst) + @_locked def set(self, name, value, ex=None, px=None, nx=False, xx=False): if (not nx and not xx) or (nx and self._db.get(name, None) is None) \ or (xx and not self._db.get(name, None) is None): @@ -631,6 +666,7 @@ def set(self, name, value, ex=None, px=None, nx=False, xx=False): __setitem__ = set + @_locked def setbit(self, name, offset, value): val = self._get_string(name, b'\x00') byte = offset // 8 @@ -652,6 +688,7 @@ def setbit(self, name, offset, value): self._db.setx(name, bytes(reconstructed)) return bool(old_value) + @_locked def setex(self, name, time, value): if isinstance(time, timedelta): time = int(timedelta_total_seconds(time)) @@ -660,6 +697,7 @@ def setex(self, name, time, value): 'value is not an integer or out of range') return self.set(name, value, ex=time) + @_locked def psetex(self, name, time_ms, value): if isinstance(time_ms, timedelta): time_ms = int(timedelta_total_seconds(time_ms) * 1000) @@ -667,6 +705,7 @@ def psetex(self, name, time_ms, value): raise ResponseError("invalid expire time in SETEX") return self.set(name, value, px=time_ms) + @_locked def setnx(self, name, value): result = self.set(name, value, nx=True) # Real Redis returns False from setnx, but None from set(nx=...) @@ -674,6 +713,7 @@ def setnx(self, name, value): return False return result + @_locked def setrange(self, name, offset, value): val = self._get_string(name, b"") if len(val) < offset: @@ -682,9 +722,11 @@ def setrange(self, name, offset, value): self._db.setx(name, val) return len(val) + @_locked def strlen(self, name): return len(self._get_string(name)) + @_locked def substr(self, name, start, end=-1): if end == -1: end = None @@ -698,9 +740,11 @@ def substr(self, name, start, end=-1): # according to the docs. getrange = substr + @_locked def ttl(self, name): return self._ttl(name) + @_locked def pttl(self, name): return self._ttl(name, 1000) @@ -720,6 +764,7 @@ def _ttl(self, name, multiplier=1): (exp_time - now).seconds + (exp_time - now).microseconds / 1E6) * multiplier)) + @_locked def type(self, name): key = self._db.get(name) if hasattr(key.__class__, 'redis_type'): @@ -742,6 +787,7 @@ def watch(self, *names): def unwatch(self): pass + @_locked def delete(self, *names): deleted = 0 for name in names: @@ -752,6 +798,7 @@ def delete(self, *names): continue return deleted + @_locked def sort(self, name, start=None, num=None, by=None, get=None, desc=False, alpha=False, store=None): """Sort and return the list, set or sorted set at ``name``. @@ -802,6 +849,7 @@ def sort(self, name, start=None, num=None, by=None, get=None, desc=False, except KeyError: return [] + @_locked def eval(self, script, numkeys, *keys_and_args): from lupa import LuaRuntime, LuaError @@ -1041,11 +1089,13 @@ def _setdefault_list(self, name): raise redis.ResponseError(_WRONGTYPE_MSG) return value + @_locked def lpush(self, name, *values): self._setdefault_list(name)[0:0] = list(reversed( [to_bytes(x) for x in values])) return len(self._db[name]) + @_locked def lrange(self, name, start, end): if end == -1: end = None @@ -1053,9 +1103,11 @@ def lrange(self, name, start, end): end += 1 return self._get_list(name)[start:end] + @_locked def llen(self, name): return len(self._get_list(name)) + @_locked @_remove_empty def lrem(self, name, count, value): value = to_bytes(value) @@ -1076,10 +1128,12 @@ def lrem(self, name, count, value): del a_list[index] return len(indices_to_remove) + @_locked def rpush(self, name, *values): self._setdefault_list(name).extend([to_bytes(x) for x in values]) return len(self._db[name]) + @_locked @_remove_empty def lpop(self, name): try: @@ -1088,6 +1142,7 @@ def lpop(self, name): return None @_lua_reply(_lua_bool_ok) + @_locked def lset(self, name, index, value): try: lst = self._get_list_or_none(name) @@ -1098,10 +1153,12 @@ def lset(self, name, index, value): raise redis.ResponseError("index out of range") return True + @_locked def rpushx(self, name, value): self._get_list(name).append(to_bytes(value)) @_lua_reply(_lua_bool_ok) + @_locked def ltrim(self, name, start, end): val = self._get_list_or_none(name) if val is not None: @@ -1112,15 +1169,18 @@ def ltrim(self, name, start, end): self._db.setx(name, val[start:end]) return True + @_locked def lindex(self, name, index): try: return self._get_list(name)[index] except IndexError: return None + @_locked def lpushx(self, name, value): self._get_list(name).insert(0, to_bytes(value)) + @_locked @_remove_empty def rpop(self, name): try: @@ -1128,6 +1188,7 @@ def rpop(self, name): except IndexError: return None + @_locked def linsert(self, name, where, refvalue, value): if where.lower() not in ('before', 'after'): raise redis.ResponseError('syntax error') @@ -1145,6 +1206,7 @@ def linsert(self, name, where, refvalue, value): lst.insert(index, to_bytes(value)) return len(lst) + @_locked def rpoplpush(self, src, dst): # _get_list instead of _setdefault_list at this point because we # don't want to create the list if nothing gets popped. @@ -1156,6 +1218,7 @@ def rpoplpush(self, src, dst): self._db.setx(dst, dst_list) return el + @_locked def blpop(self, keys, timeout=0): # This has to be a best effort approximation which follows # these rules: @@ -1174,6 +1237,7 @@ def blpop(self, keys, timeout=0): self._remove_if_empty(key) return ret + @_locked def brpop(self, keys, timeout=0): if isinstance(keys, string_types): keys = [to_bytes(keys)] @@ -1186,6 +1250,7 @@ def brpop(self, keys, timeout=0): self._remove_if_empty(key) return ret + @_locked def brpoplpush(self, src, dst, timeout=0): return self.rpoplpush(src, dst) @@ -1201,6 +1266,7 @@ def _setdefault_hash(self, name): raise redis.ResponseError(_WRONGTYPE_MSG) return value + @_locked @_remove_empty def hdel(self, name, *keys): h = self._get_hash(name) @@ -1211,6 +1277,7 @@ def hdel(self, name, *keys): rem += 1 return rem + @_locked def hexists(self, name, key): "Returns a boolean indicating if ``key`` exists within hash ``name``" if self._get_hash(name).get(key) is None: @@ -1218,26 +1285,31 @@ def hexists(self, name, key): else: return 1 + @_locked def hget(self, name, key): "Return the value of ``key`` within the hash ``name``" return self._get_hash(name).get(key) + @_locked def hstrlen(self, name, key): "Returns the string length of the value associated with field in the hash stored at key" return len(self._get_hash(name).get(key, "")) + @_locked def hgetall(self, name): "Return a Python dict of the hash's name/value pairs" all_items = dict() all_items.update(self._get_hash(name)) return all_items + @_locked def hincrby(self, name, key, amount=1): "Increment the value of ``key`` in hash ``name`` by ``amount``" new = int(self._setdefault_hash(name).get(key, b'0')) + amount self._db[name][key] = to_bytes(new) return new + @_locked def hincrbyfloat(self, name, key, amount=1.0): """Increment the value of key in hash name by floating amount""" try: @@ -1252,14 +1324,17 @@ def hincrbyfloat(self, name, key, amount=1.0): self._db[name][key] = to_bytes(new) return new + @_locked def hkeys(self, name): "Return the list of keys within hash ``name``" return list(self._get_hash(name)) + @_locked def hlen(self, name): "Return the number of elements in hash ``name``" return len(self._get_hash(name)) + @_locked def hset(self, name, key, value): """ Set ``key`` to ``value`` within hash ``name`` @@ -1269,6 +1344,7 @@ def hset(self, name, key, value): self._setdefault_hash(name)[key] = to_bytes(value) return 1 if key_is_new else 0 + @_locked def hsetnx(self, name, key, value): """ Set ``key`` to ``value`` within hash ``name`` if ``key`` does not @@ -1279,6 +1355,7 @@ def hsetnx(self, name, key, value): self._setdefault_hash(name)[key] = to_bytes(value) return True + @_locked def hmset(self, name, mapping): """ Sets each key in the ``mapping`` dict to its corresponding value @@ -1292,12 +1369,14 @@ def hmset(self, name, mapping): self._setdefault_hash(name).update(new_mapping) return True + @_locked def hmget(self, name, keys, *args): "Returns a list of values ordered identically to ``keys``" h = self._get_hash(name) all_keys = self._list_or_args(keys, args) return [h.get(k) for k in all_keys] + @_locked def hvals(self, name): "Return the list of values within hash ``name``" return list(self._get_hash(name).values()) @@ -1314,6 +1393,7 @@ def _setdefault_set(self, name): raise redis.ResponseError(_WRONGTYPE_MSG) return value + @_locked def sadd(self, name, *values): "Add ``value`` to set ``name``" a_set = self._setdefault_set(name) @@ -1321,10 +1401,12 @@ def sadd(self, name, *values): a_set |= set(to_bytes(x) for x in values) return len(a_set) - card + @_locked def scard(self, name): "Return the number of elements in set ``name``" return len(self._get_set(name)) + @_locked def sdiff(self, keys, *args): "Return the difference of sets specified by ``keys``" all_keys = (to_bytes(x) for x in self._list_or_args(keys, args)) @@ -1333,6 +1415,7 @@ def sdiff(self, keys, *args): diff -= self._get_set(key) return diff + @_locked @_remove_empty def sdiffstore(self, dest, keys, *args): """ @@ -1343,6 +1426,7 @@ def sdiffstore(self, dest, keys, *args): self._db[dest] = set(to_bytes(x) for x in diff) return len(diff) + @_locked def sinter(self, keys, *args): "Return the intersection of sets specified by ``keys``" all_keys = (to_bytes(x) for x in self._list_or_args(keys, args)) @@ -1351,6 +1435,7 @@ def sinter(self, keys, *args): intersect.intersection_update(self._get_set(key)) return intersect + @_locked @_remove_empty def sinterstore(self, dest, keys, *args): """ @@ -1361,14 +1446,17 @@ def sinterstore(self, dest, keys, *args): self._db[dest] = set(to_bytes(x) for x in intersect) return len(intersect) + @_locked def sismember(self, name, value): "Return a boolean indicating if ``value`` is a member of set ``name``" return to_bytes(value) in self._get_set(name) + @_locked def smembers(self, name): "Return all members of the set ``name``" return self._get_set(name).copy() + @_locked @_remove_empty def smove(self, src, dst, value): value = to_bytes(value) @@ -1381,6 +1469,7 @@ def smove(self, src, dst, value): except KeyError: return False + @_locked @_remove_empty def spop(self, name): "Remove and return a random member of set ``name``" @@ -1389,6 +1478,7 @@ def spop(self, name): except KeyError: return None + @_locked def srandmember(self, name, number=None): """ If ``number`` is None, returns a random member of set ``name``. @@ -1418,6 +1508,7 @@ def srandmember(self, name, number=None): in sorted(random.sample(range(len(members)), number)) ] + @_locked @_remove_empty def srem(self, name, *values): "Remove ``value`` from set ``name``" @@ -1426,6 +1517,7 @@ def srem(self, name, *values): a_set -= set(to_bytes(x) for x in values) return card - len(a_set) + @_locked def sunion(self, keys, *args): "Return the union of sets specifiued by ``keys``" all_keys = (to_bytes(x) for x in self._list_or_args(keys, args)) @@ -1434,6 +1526,7 @@ def sunion(self, keys, *args): union.update(self._get_set(key)) return union + @_locked def sunionstore(self, dest, keys, *args): """ Store the union of sets specified by ``keys`` into a new @@ -1544,6 +1637,7 @@ def _get_lexcomp_and_str(self, value): return comparator, actual_value + @_locked def zadd(self, name, *args, **kwargs): """ Set any number of score, element-name pairs to the key ``name``. Pairs @@ -1574,10 +1668,12 @@ def zadd(self, name, *args, **kwargs): raise redis.ResponseError("value is not a valid float") return len(zset) - old_len + @_locked def zcard(self, name): "Return the number of elements in the sorted set ``name``" return len(self._get_zset(name)) + @_locked def zcount(self, name, min, max): found = 0 filter_func = self._get_zelement_range_filter_func(min, max) @@ -1586,6 +1682,7 @@ def zcount(self, name, min, max): found += 1 return found + @_locked def zincrby(self, name, value, amount=1): "Increment the score of ``value`` in sorted set ``name`` by ``amount``" d = self._setdefault_zset(name) @@ -1593,6 +1690,7 @@ def zincrby(self, name, value, amount=1): d[value] = score return score + @_locked @_remove_empty def zinterstore(self, dest, keys, aggregate=None): """ @@ -1624,6 +1722,7 @@ def _apply_score_cast_func(self, items, all_items, withscores, score_cast_func): else: return [(k, score_cast_func(to_bytes(all_items[k]))) for k in items] + @_locked def zrange(self, name, start, end, desc=False, withscores=False, score_cast_func=float): """ Return a range of values from sorted set ``name`` between @@ -1657,6 +1756,7 @@ def _get_zelements_in_order(self, all_items, reverse=False): in_order = sorted(by_keyname, key=lambda x: x[1], reverse=reverse) return [el[0] for el in in_order] + @_locked def zrangebyscore(self, name, min, max, start=None, num=None, withscores=False, score_cast_func=float): """ @@ -1690,6 +1790,7 @@ def _zrangebyscore(self, name, min, max, start, num, withscores, score_cast_func matches = matches[start:start + num] return self._apply_score_cast_func(matches, all_items, withscores, score_cast_func) + @_locked def zrangebylex(self, name, min, max, start=None, num=None): """ @@ -1727,6 +1828,7 @@ def _zrangebylex(self, name, min, max, start, num, reverse): matches = matches[start:start + num] return matches + @_locked def zrank(self, name, value): """ Returns a 0-based value indicating the rank of ``value`` in sorted set @@ -1739,6 +1841,7 @@ def zrank(self, name, value): except ValueError: return None + @_locked @_remove_empty def zrem(self, name, *values): "Remove member ``value`` from sorted set ``name``" @@ -1750,6 +1853,7 @@ def zrem(self, name, *values): rem += 1 return rem + @_locked @_remove_empty def zremrangebyrank(self, name, min, max): """ @@ -1770,6 +1874,7 @@ def zremrangebyrank(self, name, min, max): num_deleted += 1 return num_deleted + @_locked @_remove_empty def zremrangebyscore(self, name, min, max): """ @@ -1785,6 +1890,7 @@ def zremrangebyscore(self, name, min, max): removed += 1 return removed + @_locked @_remove_empty def zremrangebylex(self, name, min, max): """ @@ -1806,6 +1912,7 @@ def zremrangebylex(self, name, min, max): removed += 1 return removed + @_locked def zlexcount(self, name, min, max): """ Returns a count of elements in the sorted set ``name`` @@ -1825,6 +1932,7 @@ def zlexcount(self, name, min, max): found += 1 return found + @_locked def zrevrange(self, name, start, num, withscores=False, score_cast_func=float): """ Return a range of values from sorted set ``name`` between @@ -1839,6 +1947,7 @@ def zrevrange(self, name, start, num, withscores=False, score_cast_func=float): """ return self.zrange(name, start, num, True, withscores, score_cast_func) + @_locked def zrevrangebyscore(self, name, max, min, start=None, num=None, withscores=False, score_cast_func=float): """ @@ -1856,6 +1965,7 @@ def zrevrangebyscore(self, name, max, min, start=None, num=None, return self._zrangebyscore(name, min, max, start, num, withscores, score_cast_func, reverse=True) + @_locked def zrevrangebylex(self, name, max, min, start=None, num=None): """ @@ -1875,6 +1985,7 @@ def zrevrangebylex(self, name, max, min, return self._zrangebylex(name, min, max, start, num, reverse=True) + @_locked def zrevrank(self, name, value): """ Returns a 0-based value indicating the descending rank of @@ -1885,6 +1996,7 @@ def zrevrank(self, name, value): if zrank is not None: return num_items - self.zrank(name, value) - 1 + @_locked def zscore(self, name, value): "Return the score of element ``value`` in sorted set ``name``" all_items = self._get_zset(name) @@ -1893,6 +2005,7 @@ def zscore(self, name, value): except KeyError: return None + @_locked def zunionstore(self, dest, keys, aggregate=None): """ Union multiple sorted sets specified by ``keys`` into @@ -1984,6 +2097,7 @@ def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None, lock_class=None, thread_local=True): return _Lock(self, name, timeout) + @_locked def pubsub(self, ignore_subscribe_messages=False): """ Returns a new FakePubSub instance @@ -1994,6 +2108,7 @@ def pubsub(self, ignore_subscribe_messages=False): return ps + @_locked def publish(self, channel, message): """ Loops through all available pubsub objects and publishes the @@ -2059,12 +2174,15 @@ def _scan(self, keys, cursor, match, count): result_cursor = 0 return result_cursor, result_data + @_locked def scan(self, cursor=0, match=None, count=None): return self._scan(self.keys(), int(cursor), match, count or 10) + @_locked def sscan(self, name, cursor=0, match=None, count=None): return self._scan(self.smembers(name), int(cursor), match, count or 10) + @_locked def hscan(self, name, cursor=0, match=None, count=None): cursor, keys = self._scan(self.hkeys(name), int(cursor), match, count or 10) results = {} From 6b9c43ddafab1e4596ab4e03fb0107efd12075bb Mon Sep 17 00:00:00 2001 From: Bruce Merry Date: Thu, 20 Sep 2018 11:43:28 +0200 Subject: [PATCH 08/30] Make blpop, brpop, brpoplpush work when blocking It now works as intended to have one thread blocked in one of these functions and then have another thread push an item to unblock the first. --- fakeredis.py | 74 +++++++++++++++++++++++++++++------------------ test_fakeredis.py | 36 +++++++++++++++++++++++ 2 files changed, 82 insertions(+), 28 deletions(-) diff --git a/fakeredis.py b/fakeredis.py index cf28bb4..7ffbb0e 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -346,8 +346,12 @@ def func_wrapper(*args, **kwargs): def _locked(func): @functools.wraps(func) def func_wrapper(self, *args, **kwargs): - with self._lock: - return func(self, *args, **kwargs) + with self._condition: + ret = func(self, *args, **kwargs) + # This is overkill as func might not even have modified the DB. + # But fakeredis isn't intended to be high-performance. + self._condition.notify_all() + return ret return func_wrapper @@ -370,7 +374,7 @@ def __init__(self, db=0, charset='utf-8', errors='strict', self._dbs = {} if db not in self._dbs: self._dbs[db] = _ExpiringDict() - self._lock = threading.RLock() + self._condition = threading.Condition() self._db = self._dbs[db] self._db_num = db self._encoding = charset @@ -1218,41 +1222,55 @@ def rpoplpush(self, src, dst): self._db.setx(dst, dst_list) return el - @_locked - def blpop(self, keys, timeout=0): - # This has to be a best effort approximation which follows - # these rules: - # 1) For each of those keys see if there's something we can - # pop from. - # 2) If this is not the case then simulate a timeout. - # This means that there's not really any blocking behavior here. - if isinstance(keys, string_types): - keys = [to_bytes(keys)] + def _blocking(self, timeout, func): + if timeout is None: + timeout = 0 else: - keys = [to_bytes(k) for k in keys] - for key in keys: - lst = self._get_list(key) - if lst: - ret = (key, lst.pop(0)) - self._remove_if_empty(key) + expire = datetime.now() + timedelta(seconds=timeout) + while True: + ret = func() + if ret is not None: return ret + if timeout == 0: + self._condition.wait() + else: + wait_for = (expire - datetime.now()).total_seconds() + if wait_for <= 0: + break + if not self._condition.wait(wait_for): + break + # Timed out + return None - @_locked - def brpop(self, keys, timeout=0): + def _bpop(self, keys, timeout, pop): + """Implements blpop and brpop""" if isinstance(keys, string_types): keys = [to_bytes(keys)] else: keys = [to_bytes(k) for k in keys] - for key in keys: - lst = self._get_list(key) - if lst: - ret = (key, lst.pop()) - self._remove_if_empty(key) - return ret + + def try_pop(): + for key in keys: + lst = self._get_list(key) + if lst: + ret = (key, pop(lst)) + self._remove_if_empty(key) + return ret + return None + + return self._blocking(timeout, try_pop) + + @_locked + def blpop(self, keys, timeout=0): + return self._bpop(keys, timeout, lambda lst: lst.pop(0)) + + @_locked + def brpop(self, keys, timeout=0): + return self._bpop(keys, timeout, lambda lst: lst.pop()) @_locked def brpoplpush(self, src, dst, timeout=0): - return self.rpoplpush(src, dst) + return self._blocking(timeout, lambda: self.rpoplpush(src, dst)) def _get_hash(self, name): value = self._db.get(name, _Hash()) diff --git a/test_fakeredis.py b/test_fakeredis.py index 715559c..6ff43d3 100644 --- a/test_fakeredis.py +++ b/test_fakeredis.py @@ -1013,6 +1013,24 @@ def test_blpop_allow_single_key(self): self.redis.rpush('foo', 'one') self.assertEqual(self.redis.blpop('foo', timeout=1), (b'foo', b'one')) + @attr('slow') + def test_blpop_block(self): + def push_thread(): + sleep(0.5) + self.redis.rpush('foo', 'value1') + sleep(0.5) + # Will wake the condition variable + self.redis.set('bar', 'go back to sleep some more') + self.redis.rpush('foo', 'value2') + + thread = threading.Thread(target=push_thread) + thread.start() + try: + self.assertEqual(self.redis.blpop('foo'), (b'foo', b'value1')) + self.assertEqual(self.redis.blpop('foo', timeout=5), (b'foo', b'value2')) + finally: + thread.join() + def test_blpop_wrong_type(self): self.redis.set('foo', 'bar') with self.assertRaises(redis.ResponseError): @@ -1035,6 +1053,24 @@ def test_brpop_single_key(self): self.assertEqual(self.redis.brpop('foo', timeout=1), (b'foo', b'two')) + @attr('slow') + def test_brpop_block(self): + def push_thread(): + sleep(0.5) + self.redis.rpush('foo', 'value1') + sleep(0.5) + # Will wake the condition variable + self.redis.set('bar', 'go back to sleep some more') + self.redis.rpush('foo', 'value2') + + thread = threading.Thread(target=push_thread) + thread.start() + try: + self.assertEqual(self.redis.brpop('foo'), (b'foo', b'value1')) + self.assertEqual(self.redis.brpop('foo', timeout=5), (b'foo', b'value2')) + finally: + thread.join() + def test_brpop_wrong_type(self): self.redis.set('foo', 'bar') with self.assertRaises(redis.ResponseError): From 1fbc29ced79017f02c4f1af58df81336a4b72c8c Mon Sep 17 00:00:00 2001 From: Bruce Merry Date: Thu, 20 Sep 2018 12:22:14 +0200 Subject: [PATCH 09/30] Fix handling of FakePipeline.execute when no commands queued Closes #217. --- fakeredis.py | 2 ++ test_fakeredis.py | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/fakeredis.py b/fakeredis.py index 7ffbb0e..3563e86 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -2324,6 +2324,8 @@ def __len__(self): def execute(self, raise_on_error=True): """Run all the commands in the pipeline and return the results.""" + if not self.commands: + return [] try: if self.watching: mismatches = [ diff --git a/test_fakeredis.py b/test_fakeredis.py index 6ff43d3..fa2b2dd 100644 --- a/test_fakeredis.py +++ b/test_fakeredis.py @@ -2848,6 +2848,15 @@ def test_pipeline_length(self): p.set('baz', 'quux').get('baz') self.assertEqual(2, len(p)) + def test_pipeline_no_commands(self): + # redis-py's execute is a nop if there are no commands queued, + # so it succeeds even if watched keys have been changed. + self.redis.set('foo', '1') + p = self.redis.pipeline() + p.watch('foo') + self.redis.set('foo', '2') + self.assertEqual(p.execute(), []) + def test_key_patterns(self): self.redis.mset({'one': 1, 'two': 2, 'three': 3, 'four': 4}) self.assertItemsEqual(self.redis.keys('*o*'), From 52ff6a5f1191e6b47231aa59c679be67f7dc7cf4 Mon Sep 17 00:00:00 2001 From: Bruce Merry Date: Thu, 20 Sep 2018 14:43:40 +0200 Subject: [PATCH 10/30] Rewrite _Lock based on redis.lock.Lock Unfortunately redis.lock.Lock contains bugs that prevent it being used as is (it doesn't handle decode_responses=True), so I've made a copy and fixed them. Real redis works because it uses LuaLock. --- COPYING | 29 +++++++- fakeredis.py | 183 +++++++++++++++++++++++++++++++++++++++++++--- test_fakeredis.py | 53 +++++++++++++- 3 files changed, 253 insertions(+), 12 deletions(-) diff --git a/COPYING b/COPYING index 7a2b679..f225b41 100644 --- a/COPYING +++ b/COPYING @@ -1,4 +1,4 @@ -Copyright (c) 2011 James Saryerwinnie, 2017 Bruce Merry +Copyright (c) 2011 James Saryerwinnie, 2017-2018 Bruce Merry All rights reserved. Redistribution and use in source and binary forms, with or without @@ -22,3 +22,30 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +This software contains portions of code from redis-py, which is distributed +under the following license: + +Copyright (c) 2012 Andy McCurdy + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. diff --git a/fakeredis.py b/fakeredis.py index 3563e86..51d8709 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -13,10 +13,12 @@ import types import re import functools +import uuid from itertools import count, islice import redis -from redis.exceptions import ResponseError +from redis.exceptions import ResponseError, LockError +from redis.utils import dummy import redis.client try: @@ -311,26 +313,183 @@ def _compile_pattern(pattern): return re.compile(regex, re.S) +# This is a copy of redis.lock.Lock, but with some bugs fixed. class _Lock(object): - def __init__(self, redis, name, timeout): + """ + A shared, distributed Lock. Using Redis for locking allows the Lock + to be shared across processes and/or machines. + + It's left to the user to resolve deadlock issues and make sure + multiple clients play nicely together. + """ + def __init__(self, redis, name, timeout=None, sleep=0.1, + blocking=True, blocking_timeout=None, thread_local=True): + """ + Create a new Lock instance named ``name`` using the Redis client + supplied by ``redis``. + + ``timeout`` indicates a maximum life for the lock. + By default, it will remain locked until release() is called. + ``timeout`` can be specified as a float or integer, both representing + the number of seconds to wait. + + ``sleep`` indicates the amount of time to sleep per loop iteration + when the lock is in blocking mode and another client is currently + holding the lock. + + ``blocking`` indicates whether calling ``acquire`` should block until + the lock has been acquired or to fail immediately, causing ``acquire`` + to return False and the lock not being acquired. Defaults to True. + Note this value can be overridden by passing a ``blocking`` + argument to ``acquire``. + + ``blocking_timeout`` indicates the maximum amount of time in seconds to + spend trying to acquire the lock. A value of ``None`` indicates + continue trying forever. ``blocking_timeout`` can be specified as a + float or integer, both representing the number of seconds to wait. + + ``thread_local`` indicates whether the lock token is placed in + thread-local storage. By default, the token is placed in thread local + storage so that a thread only sees its token, not a token set by + another thread. Consider the following timeline: + + time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds. + thread-1 sets the token to "abc" + time: 1, thread-2 blocks trying to acquire `my-lock` using the + Lock instance. + time: 5, thread-1 has not yet completed. redis expires the lock + key. + time: 5, thread-2 acquired `my-lock` now that it's available. + thread-2 sets the token to "xyz" + time: 6, thread-1 finishes its work and calls release(). if the + token is *not* stored in thread local storage, then + thread-1 would see the token value as "xyz" and would be + able to successfully release the thread-2's lock. + + In some use cases it's necessary to disable thread local storage. For + example, if you have code where one thread acquires a lock and passes + that lock instance to a worker thread to release later. If thread + local storage isn't disabled in this case, the worker thread won't see + the token set by the thread that acquired the lock. Our assumption + is that these cases aren't common and as such default to using + thread local storage. + """ self.redis = redis self.name = name - self.lock = threading.Lock() - redis.set(name, self, ex=timeout) + self.timeout = timeout + self.sleep = sleep + self.blocking = blocking + self.blocking_timeout = blocking_timeout + self.thread_local = bool(thread_local) + self.local = threading.local() if self.thread_local else dummy() + self.local.token = None + if self.timeout and self.sleep > self.timeout: + raise LockError("'sleep' must be less than 'timeout'") def __enter__(self): - self.acquire() + # force blocking, as otherwise the user would have to check whether + # the lock was actually acquired or not. + self.acquire(blocking=True) return self def __exit__(self, exc_type, exc_value, traceback): self.release() - def acquire(self, blocking=True, blocking_timeout=None): - return self.lock.acquire(blocking) + def acquire(self, blocking=None, blocking_timeout=None): + """ + Use Redis to hold a shared, distributed lock named ``name``. + Returns True once the lock is acquired. + + If ``blocking`` is False, always return immediately. If the lock + was acquired, return True, otherwise return False. + + ``blocking_timeout`` specifies the maximum number of seconds to + wait trying to acquire the lock. + """ + sleep = self.sleep + token = to_bytes(uuid.uuid1().hex) + if blocking is None: + blocking = self.blocking + if blocking_timeout is None: + blocking_timeout = self.blocking_timeout + stop_trying_at = None + if blocking_timeout is not None: + stop_trying_at = time.time() + blocking_timeout + while 1: + if self.do_acquire(token): + self.local.token = token + return True + if not blocking: + return False + if stop_trying_at is not None and time.time() > stop_trying_at: + return False + time.sleep(sleep) + + def do_acquire(self, token): + if self.redis.setnx(self.name, token): + if self.timeout: + # convert to milliseconds + timeout = int(self.timeout * 1000) + self.redis.pexpire(self.name, timeout) + return True + return False def release(self): - self.lock.release() - self.redis.delete(self.name) + "Releases the already acquired lock" + expected_token = self.local.token + if expected_token is None: + raise LockError("Cannot release an unlocked lock") + self.local.token = None + self.do_release(expected_token) + + def do_release(self, expected_token): + name = self.name + + def execute_release(pipe): + lock_value = to_bytes(pipe.get(name)) + if lock_value != expected_token: + raise LockError("Cannot release a lock that's no longer owned") + pipe.multi() + pipe.delete(name) + + self.redis.transaction(execute_release, name) + + def extend(self, additional_time): + """ + Adds more time to an already acquired lock. + + ``additional_time`` can be specified as an integer or a float, both + representing the number of seconds to add. + """ + if self.local.token is None: + raise LockError("Cannot extend an unlocked lock") + if self.timeout is None: + raise LockError("Cannot extend a lock with no timeout") + return self.do_extend(additional_time) + + def do_extend(self, additional_time): + pipe = self.redis.pipeline() + pipe.watch(self.name) + lock_value = to_bytes(pipe.get(self.name)) + if lock_value != self.local.token: + raise LockError("Cannot extend a lock that's no longer owned") + expiration = pipe.pttl(self.name) + if expiration is None or expiration < 0: + # Redis evicted the lock key between the previous get() and now + # we'll handle this when we call pexpire() + expiration = 0 + pipe.multi() + pipe.pexpire(self.name, expiration + int(additional_time * 1000)) + + try: + response = pipe.execute() + except redis.WatchError: + # someone else acquired the lock + raise LockError("Cannot extend a lock that's no longer owned") + if not response[0]: + # pexpire returns False if the key doesn't exist + raise LockError("Cannot extend a lock that's no longer owned") + return True def _check_conn(func): @@ -2113,7 +2272,11 @@ def transaction(self, func, *keys, **kwargs): def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None, lock_class=None, thread_local=True): - return _Lock(self, name, timeout) + if lock_class is None: + lock_class = _Lock + return lock_class(self, name, timeout=timeout, sleep=sleep, + blocking_timeout=blocking_timeout, + thread_local=thread_local) @_locked def pubsub(self, ignore_subscribe_messages=False): diff --git a/test_fakeredis.py b/test_fakeredis.py index fa2b2dd..4f73f3c 100644 --- a/test_fakeredis.py +++ b/test_fakeredis.py @@ -3953,7 +3953,7 @@ def test_expire_should_not_handle_floating_point_values(self): def test_lock(self): lock = self.redis.lock('foo') - lock.acquire() + self.assertTrue(lock.acquire()) self.assertTrue(self.redis.exists('foo')) lock.release() self.assertFalse(self.redis.exists('foo')) @@ -3961,6 +3961,57 @@ def test_lock(self): self.assertTrue(self.redis.exists('bar')) self.assertFalse(self.redis.exists('bar')) + def test_unlock_without_lock(self): + lock = self.redis.lock('foo') + with self.assertRaises(redis.exceptions.LockError): + lock.release() + + @attr('slow') + def test_unlock_expired(self): + lock = self.redis.lock('foo', timeout=0.01, sleep=0.001) + self.assertTrue(lock.acquire()) + sleep(0.1) + with self.assertRaises(redis.exceptions.LockError): + lock.release() + + @attr('slow') + def test_lock_blocking_timeout(self): + lock = self.redis.lock('foo') + self.assertTrue(lock.acquire()) + lock2 = self.redis.lock('foo') + self.assertFalse(lock2.acquire(blocking_timeout=1)) + + def test_lock_nonblocking(self): + lock = self.redis.lock('foo') + self.assertTrue(lock.acquire()) + lock2 = self.redis.lock('foo') + self.assertFalse(lock2.acquire(blocking=False)) + + def test_lock_extend(self): + lock = self.redis.lock('foo', timeout=2) + lock.acquire() + lock.extend(3) + ttl = int(self.redis.pttl('foo')) + self.assertGreater(ttl, 4000) + self.assertLessEqual(ttl, 5000) + + def test_lock_extend_exceptions(self): + lock1 = self.redis.lock('foo', timeout=2) + with self.assertRaises(redis.exceptions.LockError): + lock1.extend(3) + lock2 = self.redis.lock('foo') + lock2.acquire() + with self.assertRaises(redis.exceptions.LockError): + lock2.extend(3) # Cannot extend a lock with no timeout + + @attr('slow') + def test_lock_extend_expired(self): + lock = self.redis.lock('foo', timeout=0.01, sleep=0.001) + lock.acquire() + sleep(0.1) + with self.assertRaises(redis.exceptions.LockError): + lock.extend(3) + class DecodeMixin(object): decode_responses = True From 93e1d3217baa6c0830264734960e40db4c416fe8 Mon Sep 17 00:00:00 2001 From: Bruce Merry Date: Thu, 20 Sep 2018 15:22:18 +0200 Subject: [PATCH 11/30] Fixes for Python 2 - timedelta.total_seconds doesn't exist, so use the existing timedelta_total_seconds helper - threading.Condition.wait doesn't return an indication of timeout, so just go around the loop again regardless. --- fakeredis.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fakeredis.py b/fakeredis.py index 51d8709..d24148d 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -1393,11 +1393,10 @@ def _blocking(self, timeout, func): if timeout == 0: self._condition.wait() else: - wait_for = (expire - datetime.now()).total_seconds() + wait_for = timedelta_total_seconds(expire - datetime.now()) if wait_for <= 0: break - if not self._condition.wait(wait_for): - break + self._condition.wait(wait_for) # Timed out return None From 210d360b155d4409512dd2f7a90c83622b6e944f Mon Sep 17 00:00:00 2001 From: Alex Eshoo Date: Mon, 24 Sep 2018 14:46:14 -0400 Subject: [PATCH 12/30] cleaned up unittests --- test_fakeredis.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/test_fakeredis.py b/test_fakeredis.py index b26c6ef..17cd680 100644 --- a/test_fakeredis.py +++ b/test_fakeredis.py @@ -3082,17 +3082,13 @@ def _listen(pubsub, q): def test_pubsub_run_in_thread(self): q = Queue() - def _queue_msg(msg, q): - q.put(msg) - pubsub = self.redis.pubsub() - pubsub.subscribe(**{"channel": lambda e: _queue_msg(e, q)}) + pubsub.subscribe(channel=q.put) pubsub_thread = pubsub.run_in_thread() msg = b"Hello World" self.redis.publish("channel", msg) - sleep(1) retrieved = q.get() self.assertEqual(retrieved["data"], msg) @@ -3100,11 +3096,13 @@ def _queue_msg(msg, q): pubsub_thread.join() self.assertTrue(not pubsub_thread.is_alive()) - pubsub.subscribe(**{"channel": None}) + pubsub.subscribe(channel=None) with self.assertRaises(redis.exceptions.PubSubError): pubsub_thread = pubsub.run_in_thread() - pubsub.psubscribe(**{"channel": None}) + pubsub.unsubscribe("channel") + + pubsub.psubscribe(channel=None) with self.assertRaises(redis.exceptions.PubSubError): pubsub_thread = pubsub.run_in_thread() From 431dc7b1bb0ae7ad1a3ecd43ebbe0524bd69edf4 Mon Sep 17 00:00:00 2001 From: Alex Eshoo Date: Mon, 24 Sep 2018 14:50:06 -0400 Subject: [PATCH 13/30] imported worker thread from redis-py --- fakeredis.py | 31 ++----------------------------- 1 file changed, 2 insertions(+), 29 deletions(-) diff --git a/fakeredis.py b/fakeredis.py index c2f0498..0f4e0af 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -18,6 +18,7 @@ import redis from redis.exceptions import ResponseError, PubSubError import redis.client +from redis.client import PubSubWorkerThread try: # Python 2.6, 2.7 @@ -2463,34 +2464,6 @@ def run_in_thread(self, sleep_time=0, daemon=False): if handler is None: raise PubSubError("Pattern: '%s' has no handler registered" % (channel,)) - thread = FakePubSubWorkerThread(self, sleep_time, daemon=daemon) + thread = PubSubWorkerThread(self, sleep_time, daemon=daemon) thread.start() return thread - - -class FakePubSubWorkerThread(threading.Thread): - def __init__(self, pubsub, sleep_time, daemon=False): - super(FakePubSubWorkerThread, self).__init__() - self.daemon = daemon - self.pubsub = pubsub - self.sleep_time = sleep_time - self._running = False - - def run(self): - if self._running: - return - self._running = True - pubsub = self.pubsub - sleep_time = self.sleep_time - while pubsub.subscribed: - pubsub.get_message(ignore_subscribe_messages=True, - timeout=sleep_time) - pubsub.close() - self._running = False - - def stop(self): - # stopping simply unsubscribes from all channels and patterns. - # the unsubscribe responses that are generated will short circuit - # the loop in run(), calling pubsub.close() to clean up the connection - self.pubsub.unsubscribe() - self.pubsub.punsubscribe() From ed36fff38ded3d34af8bb7e9f6ffe5dd1d8071e2 Mon Sep 17 00:00:00 2001 From: Bruce Merry Date: Tue, 25 Sep 2018 14:41:01 +0200 Subject: [PATCH 14/30] Add more locking unit tests Took a few tests from #216. --- test_fakeredis.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/test_fakeredis.py b/test_fakeredis.py index 4f73f3c..712d2ac 100644 --- a/test_fakeredis.py +++ b/test_fakeredis.py @@ -3987,6 +3987,26 @@ def test_lock_nonblocking(self): lock2 = self.redis.lock('foo') self.assertFalse(lock2.acquire(blocking=False)) + def test_lock_twice(self): + lock = self.redis.lock('foo') + self.assertTrue(lock.acquire(blocking=False)) + self.assertFalse(lock.acquire(blocking=False)) + + def test_acquiring_lock_different_lock_release(self): + lock1 = self.redis.lock('foo') + lock2 = self.redis.lock('foo') + self.assertTrue(lock1.acquire(blocking=False)) + self.assertFalse(lock2.acquire(blocking=False)) + + # Test only releasing lock1 actually releases the lock + with self.assertRaises(redis.exceptions.LockError): + lock2.release() + self.assertFalse(lock2.acquire(blocking=False)) + lock1.release() + # Locking with lock2 now has the lock + self.assertTrue(lock2.acquire(blocking=False)) + self.assertFalse(lock1.acquire(blocking=False)) + def test_lock_extend(self): lock = self.redis.lock('foo', timeout=2) lock.acquire() From a4e85b8959713f78e1e893ac375d69a9f8b17abf Mon Sep 17 00:00:00 2001 From: Bruce Merry Date: Tue, 25 Sep 2018 16:02:47 +0200 Subject: [PATCH 15/30] Prepare for 0.14.0 release --- README.rst | 11 +++++++++++ fakeredis.py | 2 +- setup.py | 2 +- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 38deca1..5432b25 100644 --- a/README.rst +++ b/README.rst @@ -294,6 +294,17 @@ they have all been tagged as 'slow' so you can skip them by running:: Revision history ================ +0.14.0 +------ +This release greatly improves support for threads: the bulk of commands are now +thread-safe, ``lock`` has been rewritten to more closely match redis-py, and +pubsub now supports ``run_in_thread``: + +- `#213 `_ pipeline.watch runs transaction even if no commands are queued +- `#214 `_ Added pubsub.run_in_thread as it is implemented in redis-py +- `#215 `_ Keep pace with redis-py for zrevrange method +- `#216 `_ Update behavior of lock to behave closer to redis lock + 0.13.1 ------ - `#208 `_ eval's KEYS and ARGV are now lua tables diff --git a/fakeredis.py b/fakeredis.py index fb193d7..4f4defe 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -32,7 +32,7 @@ PY2 = sys.version_info[0] == 2 -__version__ = '0.13.1' +__version__ = '0.14.0' if PY2: diff --git a/setup.py b/setup.py index e0051ff..2964481 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setup( name='fakeredis', - version='0.13.1', + version='0.14.0', description="Fake implementation of redis API for testing purposes.", long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(), From 2f52bcde8f19ea7a33259cec82ca33ef7b1abd34 Mon Sep 17 00:00:00 2001 From: Daniel Williams Date: Tue, 16 Oct 2018 13:13:57 -0600 Subject: [PATCH 16/30] Adding placeholders for the SAVE, BGSAVE, and LASTSAVE commands. --- fakeredis.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/fakeredis.py b/fakeredis.py index 4f4defe..a1787e0 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -783,6 +783,15 @@ def persist(self, name): def ping(self): return True + def bgsave(self): + pass + + def save(self): + pass + + def lastsave(self): + pass + @_lua_reply(_lua_bool_ok) @_locked def rename(self, src, dst): From 2a76f9a66e1001ca3d838e1b8b7e694742b2f2d9 Mon Sep 17 00:00:00 2001 From: Daniel Williams Date: Tue, 16 Oct 2018 14:03:50 -0600 Subject: [PATCH 17/30] Added method bodies to new commands and tests. --- fakeredis.py | 9 ++++++--- test_fakeredis.py | 9 +++++++++ 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/fakeredis.py b/fakeredis.py index a1787e0..e62d4f5 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -541,6 +541,7 @@ def __init__(self, db=0, charset='utf-8', errors='strict', self._encoding_errors = errors self._pubsubs = [] self._decode_responses = decode_responses + self._lastsave = datetime.now() self.connected = connected _patch_responses(self, _check_conn) @@ -784,13 +785,15 @@ def ping(self): return True def bgsave(self): - pass + self._lastsave = datetime.now() + return True def save(self): - pass + self._lastsave = datetime.now() + return True def lastsave(self): - pass + return self._lastsave @_lua_reply(_lua_bool_ok) @_locked diff --git a/test_fakeredis.py b/test_fakeredis.py index a19ae78..adb814a 100644 --- a/test_fakeredis.py +++ b/test_fakeredis.py @@ -2870,6 +2870,15 @@ def test_key_patterns(self): def test_ping(self): self.assertTrue(self.redis.ping()) + def test_bgsave(self): + self.assertTrue(self.redis.bgsave()) + + def test_save(self): + self.assertTrue(self.redis.save()) + + def test_lastsave(self): + self.assertTrue(isinstance(self.redis.lastsave(), datetime)) + def test_type(self): self.redis.set('string_key', "value") self.redis.lpush("list_key", "value") From 8d69fa9ed3d2754a2a824ee2d99fabaad054aa1c Mon Sep 17 00:00:00 2001 From: Daniel Williams Date: Tue, 16 Oct 2018 14:20:23 -0600 Subject: [PATCH 18/30] Added two more tests that check the updating of the lastsave timestamp. --- test_fakeredis.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/test_fakeredis.py b/test_fakeredis.py index adb814a..6d0d847 100644 --- a/test_fakeredis.py +++ b/test_fakeredis.py @@ -2879,6 +2879,22 @@ def test_save(self): def test_lastsave(self): self.assertTrue(isinstance(self.redis.lastsave(), datetime)) + @attr('slow') + def test_bgsave_timestamp_update(self): + early_timestamp = self.redis.lastsave() + sleep(1) + self.assertTrue(self.redis.bgsave()) + late_timestamp = self.redis.lastsave() + self.assertLess(early_timestamp, late_timestamp) + + @attr('slow') + def test_save_timestamp_update(self): + early_timestamp = self.redis.lastsave() + sleep(1) + self.assertTrue(self.redis.save()) + late_timestamp = self.redis.lastsave() + self.assertLess(early_timestamp, late_timestamp) + def test_type(self): self.redis.set('string_key', "value") self.redis.lpush("list_key", "value") From f9deeb09b6a51730ab21e8ecf4f12562ede2df9e Mon Sep 17 00:00:00 2001 From: Daniel Williams Date: Tue, 16 Oct 2018 14:30:38 -0600 Subject: [PATCH 19/30] Removing implemented commands from readme, and extending the sleep to make sure time stamps are different. --- README.rst | 3 --- test_fakeredis.py | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/README.rst b/README.rst index 5432b25..65e0edd 100644 --- a/README.rst +++ b/README.rst @@ -116,7 +116,6 @@ server ------ * bgrewriteaof - * bgsave * client kill * client list * client getname @@ -135,7 +134,6 @@ server * debug object * debug segfault * info - * lastsave * memory doctor * memory help * memory malloc-stats @@ -144,7 +142,6 @@ server * memory usage * monitor * role - * save * shutdown * slaveof * slowlog diff --git a/test_fakeredis.py b/test_fakeredis.py index 6d0d847..71548d7 100644 --- a/test_fakeredis.py +++ b/test_fakeredis.py @@ -2882,7 +2882,7 @@ def test_lastsave(self): @attr('slow') def test_bgsave_timestamp_update(self): early_timestamp = self.redis.lastsave() - sleep(1) + sleep(2) self.assertTrue(self.redis.bgsave()) late_timestamp = self.redis.lastsave() self.assertLess(early_timestamp, late_timestamp) @@ -2890,7 +2890,7 @@ def test_bgsave_timestamp_update(self): @attr('slow') def test_save_timestamp_update(self): early_timestamp = self.redis.lastsave() - sleep(1) + sleep(2) self.assertTrue(self.redis.save()) late_timestamp = self.redis.lastsave() self.assertLess(early_timestamp, late_timestamp) From ec92d60f8387665e298c98612b804b563d4a1ab8 Mon Sep 17 00:00:00 2001 From: Daniel Williams Date: Tue, 16 Oct 2018 14:41:16 -0600 Subject: [PATCH 20/30] TestRealStricRedis version of bgsave test are failing. Might not be waiting long enough for the background process to finish. --- test_fakeredis.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test_fakeredis.py b/test_fakeredis.py index 71548d7..1a08c19 100644 --- a/test_fakeredis.py +++ b/test_fakeredis.py @@ -2882,15 +2882,16 @@ def test_lastsave(self): @attr('slow') def test_bgsave_timestamp_update(self): early_timestamp = self.redis.lastsave() - sleep(2) + sleep(1) self.assertTrue(self.redis.bgsave()) + sleep(1) late_timestamp = self.redis.lastsave() self.assertLess(early_timestamp, late_timestamp) @attr('slow') def test_save_timestamp_update(self): early_timestamp = self.redis.lastsave() - sleep(2) + sleep(1) self.assertTrue(self.redis.save()) late_timestamp = self.redis.lastsave() self.assertLess(early_timestamp, late_timestamp) From 043fc642cbeb2dc37dd1a18ac2b788975f970ff6 Mon Sep 17 00:00:00 2001 From: Owen Stranathan Date: Tue, 6 Nov 2018 15:02:04 -0500 Subject: [PATCH 21/30] Import MutableMapping ABC from per https://docs.python.org/3/whatsnew/3.7.html#id3 --- fakeredis.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/fakeredis.py b/fakeredis.py index e62d4f5..e9b6296 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -4,7 +4,13 @@ import copy from ctypes import CDLL, POINTER, c_double, c_char_p, pointer from ctypes.util import find_library -from collections import MutableMapping +try: + # Python 3.8+ https://docs.python.org/3/whatsnew/3.7.html#id3 + from collections.abc import MutableMapping +except: + # Python 2.6, 2.7 + from collections import MutableMapping + from datetime import datetime, timedelta import operator import sys From 5b465806a951c6efc58d21ae73e16f742657c823 Mon Sep 17 00:00:00 2001 From: Owen Stranathan Date: Tue, 6 Nov 2018 15:28:17 -0500 Subject: [PATCH 22/30] Added py37 to tox.ini and Pipfile(.lock) --- Pipfile | 15 ++++++ Pipfile.lock | 146 +++++++++++++++++++++++++++++++++++++++++++++++++++ tox.ini | 2 +- 3 files changed, 162 insertions(+), 1 deletion(-) create mode 100644 Pipfile create mode 100644 Pipfile.lock diff --git a/Pipfile b/Pipfile new file mode 100644 index 0000000..d6e7afc --- /dev/null +++ b/Pipfile @@ -0,0 +1,15 @@ +[[source]] +url = "https://pypi.org/simple" +verify_ssl = true +name = "pypi" + + +[dev-packages] +tox = "*" +nose = "==1.3.4" + +[packages] +"flake8" = "<3.0.0" +redis = "==2.10.6" +lupa = "==1.6" +future = "==0.16.0" diff --git a/Pipfile.lock b/Pipfile.lock new file mode 100644 index 0000000..17b66ba --- /dev/null +++ b/Pipfile.lock @@ -0,0 +1,146 @@ +{ + "_meta": { + "hash": { + "sha256": "6cb661ed3b64636951038fa4018fa1c9c5181b6b0a54d195ae848495a2aa0af1" + }, + "pipfile-spec": 6, + "requires": {}, + "sources": [ + { + "name": "pypi", + "url": "https://pypi.org/simple", + "verify_ssl": true + } + ] + }, + "default": { + "flake8": { + "hashes": [ + "sha256:231cd86194aaec4bdfaa553ae1a1cd9b7b4558332fbc10136c044940d587a778", + "sha256:7ac3bbaac27174d95bc4734ed23a07de567ffbcf4fc7e316854b4f3015d4fd15" + ], + "index": "pypi", + "version": "==2.6.2" + }, + "future": { + "hashes": [ + "sha256:e39ced1ab767b5936646cedba8bcce582398233d6a627067d4c6a454c90cfedb" + ], + "index": "pypi", + "version": "==0.16.0" + }, + "lupa": { + "hashes": [ + "sha256:08e21b44f81c4853cd35e4c720f56e3ab98eebbc3f15d737e0ff49c0f198985f", + "sha256:0ca1ad5174640d6927033af7459df1d407484f799590ed13d29e274efa539f92", + "sha256:0d8f15bb46f82f28c4e9c8f74be4de4237e8ece1b2302a5d00dc50c95b52ed61", + "sha256:19f0ecf2c9a6102db7e882d1154053e967007f6fac4f399790c090388bc420ee", + "sha256:1e0c47c8203f6c78bf5ab3c9b1ee07a7131a218376e329682d4e452418f57c16", + "sha256:2d2f2af75c0ad260d7410544b08e33f37b0b5357756e81d68119fe1f7ae2e90e", + "sha256:2d69bfd31f985204866db9af10aad072e7967bc5c1aeb260330a326c1b31f133", + "sha256:5badd2f13e85af577f8c1082a7a270d15f7959cebecb32cf05ff49a55b8811ce", + "sha256:91081b05fec0a10fe77329c854fc6a79d042036b305ce62d46be2cd8133eb59c", + "sha256:a10d932ecc4caddbcc2fdac9de8db01e67f23121d432f806d9d626149634e5a0", + "sha256:a56a3540e1cf06c415d0f8f93afa2060ffb7ef2cebfde75f30e6a151591fdd96", + "sha256:b438cda97c1a0579816eb75fe83aca9b9a63645ca2f43082821fd912311866fe", + "sha256:b916fef92178fa29cb0752d078b4a515ad5e88b01e90b97ce6c3002d36016c70", + "sha256:db4a96b2f40558787ddf13a6b2008b5d3157f44470afcaf1491ad24ce4b4d10d", + "sha256:ded7aa0b9576b50dc8103a0a834222e80e16b1d9987c95dbe6a77bcf2b6b27d8", + "sha256:f18fd3b96c68a27ff19d2ca3c01c89928b081f37f62bead841fdcb13fdf23ed4", + "sha256:fb085b687d74103a05f54cbec25dc1d6c4b101188cbe42859b666794d2c26a67" + ], + "index": "pypi", + "version": "==1.6" + }, + "mccabe": { + "hashes": [ + "sha256:16293af41e7242031afd73896fef6458f4cad38201d21e28f344fff50ae1c25e", + "sha256:f9b58bf366c1506dcd6117b33e5c4874746f0de859c9c7cab8b516cb6be1d22e" + ], + "version": "==0.5.3" + }, + "pycodestyle": { + "hashes": [ + "sha256:2ce83f2046f5ab85c652ceceddfbde7a64a909900989b4b43e92b10b743d0ce5", + "sha256:37f0420b14630b0eaaf452978f3a6ea4816d787c3e6dcbba6fb255030adae2e7" + ], + "version": "==2.0.0" + }, + "pyflakes": { + "hashes": [ + "sha256:2e4a1b636d8809d8f0a69f341acf15b2e401a3221ede11be439911d23ce2139e", + "sha256:e87bac26c62ea5b45067cc89e4a12f56e1483f1f2cda17e7c9b375b9fd2f40da" + ], + "version": "==1.2.3" + }, + "redis": { + "hashes": [ + "sha256:8a1900a9f2a0a44ecf6e8b5eb3e967a9909dfed219ad66df094f27f7d6f330fb", + "sha256:a22ca993cea2962dbb588f9f30d0015ac4afcc45bee27d3978c0dbe9e97c6c0f" + ], + "index": "pypi", + "version": "==2.10.6" + } + }, + "develop": { + "filelock": { + "hashes": [ + "sha256:b8d5ca5ca1c815e1574aee746650ea7301de63d87935b3463d26368b76e31633", + "sha256:d610c1bb404daf85976d7a82eb2ada120f04671007266b708606565dd03b5be6" + ], + "version": "==3.0.10" + }, + "nose": { + "hashes": [ + "sha256:76bc63a4e2d5e5a0df77ca7d18f0f56e2c46cfb62b71103ba92a92c79fab1e03", + "sha256:78b03116badd0dbb2ed4dedc96a4a3d42138b94bb89fc2eb99f7f3bdfd199f56", + "sha256:cc8aebdec5a5fb989912f157f77b3c21a5e2f2da623af90a7b476b106a834abf" + ], + "index": "pypi", + "version": "==1.3.4" + }, + "pluggy": { + "hashes": [ + "sha256:447ba94990e8014ee25ec853339faf7b0fc8050cdc3289d4d71f7f410fb90095", + "sha256:bde19360a8ec4dfd8a20dcb811780a30998101f078fc7ded6162f0076f50508f" + ], + "version": "==0.8.0" + }, + "py": { + "hashes": [ + "sha256:bf92637198836372b520efcba9e020c330123be8ce527e535d185ed4b6f45694", + "sha256:e76826342cefe3c3d5f7e8ee4316b80d1dd8a300781612ddbc765c17ba25a6c6" + ], + "version": "==1.7.0" + }, + "six": { + "hashes": [ + "sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9", + "sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb" + ], + "version": "==1.11.0" + }, + "toml": { + "hashes": [ + "sha256:229f81c57791a41d65e399fc06bf0848bab550a9dfd5ed66df18ce5f05e73d5c", + "sha256:235682dd292d5899d361a811df37e04a8828a5b1da3115886b73cf81ebc9100e" + ], + "version": "==0.10.0" + }, + "tox": { + "hashes": [ + "sha256:513e32fdf2f9e2d583c2f248f47ba9886428c949f068ac54a0469cac55df5862", + "sha256:75fa30e8329b41b664585f5fb837e23ce1d7e6fa1f7811f2be571c990f9d911b" + ], + "index": "pypi", + "version": "==3.5.3" + }, + "virtualenv": { + "hashes": [ + "sha256:686176c23a538ecc56d27ed9d5217abd34644823d6391cbeb232f42bf722baad", + "sha256:f899fafcd92e1150f40c8215328be38ff24b519cd95357fa6e78e006c7638208" + ], + "version": "==16.1.0" + } + } +} diff --git a/tox.ini b/tox.ini index 023eee8..337120b 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py26,py27,py33,py34,py35,py36 +envlist = py26,py27,py33,py34,py35,py36,py3.7 [testenv] usedevelop = True From fb7b600a25799b063c9c6f3afe65ab129a285406 Mon Sep 17 00:00:00 2001 From: Owen Stranathan Date: Wed, 7 Nov 2018 08:07:18 -0500 Subject: [PATCH 23/30] Explicitly catch ImportError --- fakeredis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fakeredis.py b/fakeredis.py index e9b6296..e7ae34e 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -7,7 +7,7 @@ try: # Python 3.8+ https://docs.python.org/3/whatsnew/3.7.html#id3 from collections.abc import MutableMapping -except: +except ImportError: # Python 2.6, 2.7 from collections import MutableMapping From c5a23b5e8113c3c205bf7738d26817de5938c2d9 Mon Sep 17 00:00:00 2001 From: Bruce Merry Date: Wed, 7 Nov 2018 19:49:38 +0200 Subject: [PATCH 24/30] No longer test that SDIFF from Lua sorts It was the case in real redis up to v4, but wasn't intended to be part of the contract (see antirez/redis#5538) --- fakeredis.py | 4 ++++ test_fakeredis.py | 7 +++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/fakeredis.py b/fakeredis.py index e7ae34e..a17503f 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -1093,6 +1093,10 @@ def _convert_redis_result(self, lua_runtime, result): ] return lua_runtime.table_from(converted) elif isinstance(result, set): + # Redis up to 4 (with default options) sorts sets when returning + # them to Lua, but only to make scripts deterministic for + # replication. Redis 5 no longer sorts, but we maintain the sort + # so that unit tests written against fakeredis are reproducible. converted = sorted( self._convert_redis_result(lua_runtime, item) for item in result diff --git a/test_fakeredis.py b/test_fakeredis.py index 1a08c19..03e9fa1 100644 --- a/test_fakeredis.py +++ b/test_fakeredis.py @@ -3688,8 +3688,11 @@ def test_eval_sdiff(self): return value; end ''', 2, 'foo', 'bar') - # Lua must receive the set *sorted* - self.assertEqual(val, [b'a', b'c', b'd', b'e', b'f']) + # Note: while fakeredis sorts the result when using Lua, this isn't + # actually part of the redis contract (see + # https://github.com/antirez/redis/issues/5538), and for Redis 5 we + # need to sort val to pass the test. + self.assertEqual(sorted(val), [b'a', b'c', b'd', b'e', b'f']) class TestFakeRedis(unittest.TestCase): From aa6a0d670e08528b66e115818f1818892d06f745 Mon Sep 17 00:00:00 2001 From: Bruce Merry Date: Thu, 8 Nov 2018 07:48:33 +0200 Subject: [PATCH 25/30] Prepare for 0.15.0 release --- README.rst | 5 +++++ fakeredis.py | 2 +- setup.py | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 65e0edd..43c47cb 100644 --- a/README.rst +++ b/README.rst @@ -291,6 +291,11 @@ they have all been tagged as 'slow' so you can skip them by running:: Revision history ================ +0.15.0 +------ +- `#219 `_ Add SAVE, BGSAVE and LASTSAVE commands +- `#222 `_ Fix deprecation warnings in Python 3.7 + 0.14.0 ------ This release greatly improves support for threads: the bulk of commands are now diff --git a/fakeredis.py b/fakeredis.py index a17503f..3dfe256 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -38,7 +38,7 @@ PY2 = sys.version_info[0] == 2 -__version__ = '0.14.0' +__version__ = '0.15.0' if PY2: diff --git a/setup.py b/setup.py index 2964481..4086d96 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setup( name='fakeredis', - version='0.14.0', + version='0.15.0', description="Fake implementation of redis API for testing purposes.", long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(), From 6d56cfd3346588dda33191eab823c3af16c3cb65 Mon Sep 17 00:00:00 2001 From: Viljo Viitanen Date: Thu, 8 Nov 2018 19:56:30 +0200 Subject: [PATCH 26/30] Add test for __delitem__ --- test_fakeredis.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test_fakeredis.py b/test_fakeredis.py index 03e9fa1..eea5f63 100644 --- a/test_fakeredis.py +++ b/test_fakeredis.py @@ -622,6 +622,11 @@ def test_setnx(self): self.assertEqual(self.redis.setnx('foo', 'baz'), False) self.assertEqual(self.redis.get('foo'), b'bar') + def test_del_operator(self): + self.redis['foo'] = 'bar' + del self.redis['foo'] + self.assertEqual(self.redis.get('foo'), None) + def test_delete(self): self.redis['foo'] = 'bar' self.assertEqual(self.redis.delete('foo'), True) From 49f6f7a85b84a466344a4c1ca342cf362367bc3e Mon Sep 17 00:00:00 2001 From: Viljo Viitanen Date: Thu, 8 Nov 2018 21:44:34 +0200 Subject: [PATCH 27/30] Add __delitem__ implementation --- fakeredis.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fakeredis.py b/fakeredis.py index 3dfe256..125a38c 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -980,6 +980,9 @@ def delete(self, *names): continue return deleted + def __delitem__(self, name): + return self.delete(name) + @_locked def sort(self, name, start=None, num=None, by=None, get=None, desc=False, alpha=False, store=None): From 689b2310b920d44b5ad0b3f3af87fce1fe576bb7 Mon Sep 17 00:00:00 2001 From: Bruce Merry Date: Wed, 28 Nov 2018 07:40:26 +0200 Subject: [PATCH 28/30] Depend on redis<3 redis-py 3 is not backwards-compatible, and fakeredis only implements the version 2 interface. Closes #226 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 4086d96..0bcec60 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ 'Programming Language :: Python :: 3.7' ], install_requires=[ - 'redis', + 'redis<3', ], extras_require={ "lua": ['lupa'] From 34be573551e9ff5dd21d9e41b64a0e3dbaf5d58c Mon Sep 17 00:00:00 2001 From: Bruce Merry Date: Wed, 28 Nov 2018 07:49:16 +0200 Subject: [PATCH 29/30] Prepare for 0.16 release --- README.rst | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/README.rst b/README.rst index 43c47cb..00a83de 100644 --- a/README.rst +++ b/README.rst @@ -16,6 +16,20 @@ many times you want to write unittests that do not talk to an external server (such as redis). This module now allows tests to simply use this module as a reasonable substitute for redis. +Note on redis-py 3 +================== + +redis-py 3 is a recent backwards-compatible update to redis-py. It is not yet +supported by fakeredis, which only implements the redis-py 2 API. + +If you need to run unit tests against the redis-py 3 API, take a look at +birdisle_. It embeds the redis server code into your process and supports +redis-py 2 and 3. It is also a more accurate emulation of redis, because it +is using the actual redis codebase. The downside is that it currently only +supports Linux. + +.. _birdisle: https://birdisle.readthedocs.io/en/latest/ + How to Use ========== @@ -291,6 +305,11 @@ they have all been tagged as 'slow' so you can skip them by running:: Revision history ================ +0.16.0 +------ +- `#224 `_ Add __delitem__ +- Restrict to redis<3 + 0.15.0 ------ - `#219 `_ Add SAVE, BGSAVE and LASTSAVE commands From aed0c2fe19e58923d0b76527ad931d9ed653cd4a Mon Sep 17 00:00:00 2001 From: Bruce Merry Date: Wed, 28 Nov 2018 07:49:41 +0200 Subject: [PATCH 30/30] Bump version number to 0.16 --- fakeredis.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fakeredis.py b/fakeredis.py index 125a38c..e5491dc 100644 --- a/fakeredis.py +++ b/fakeredis.py @@ -38,7 +38,7 @@ PY2 = sys.version_info[0] == 2 -__version__ = '0.15.0' +__version__ = '0.16.0' if PY2: diff --git a/setup.py b/setup.py index 0bcec60..245dd41 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setup( name='fakeredis', - version='0.15.0', + version='0.16.0', description="Fake implementation of redis API for testing purposes.", long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),