Skip to content

Commit 0147be0

Browse files
authored
gh-131525: Remove _HashedSeq wrapper from lru_cache (gh-131922)
1 parent c535a13 commit 0147be0

File tree

1 file changed

+1
-17
lines changed

1 file changed

+1
-17
lines changed

Lib/functools.py

+1-17
Original file line numberDiff line numberDiff line change
@@ -516,22 +516,6 @@ def _unwrap_partialmethod(func):
516516

517517
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
518518

519-
class _HashedSeq(list):
520-
""" This class guarantees that hash() will be called no more than once
521-
per element. This is important because the lru_cache() will hash
522-
the key multiple times on a cache miss.
523-
524-
"""
525-
526-
__slots__ = 'hashvalue'
527-
528-
def __init__(self, tup, hash=hash):
529-
self[:] = tup
530-
self.hashvalue = hash(tup)
531-
532-
def __hash__(self):
533-
return self.hashvalue
534-
535519
def _make_key(args, kwds, typed,
536520
kwd_mark = (object(),),
537521
fasttypes = {int, str},
@@ -561,7 +545,7 @@ def _make_key(args, kwds, typed,
561545
key += tuple(type(v) for v in kwds.values())
562546
elif len(key) == 1 and type(key[0]) in fasttypes:
563547
return key[0]
564-
return _HashedSeq(key)
548+
return key
565549

566550
def lru_cache(maxsize=128, typed=False):
567551
"""Least-recently-used cache decorator.

0 commit comments

Comments
 (0)