Skip to content

Commit

Permalink
Merge pull request #1842 from opentensor/warmfix/change-decoder-cacheing
Browse files Browse the repository at this point in the history
Warmfix/change decoder cacheing
  • Loading branch information
thewhaleking committed May 6, 2024
2 parents 3b47287 + 3def86b commit 7dd819d
Showing 1 changed file with 7 additions and 7 deletions.
14 changes: 7 additions & 7 deletions bittensor/subtensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,18 +92,18 @@

T = TypeVar("T")


#######
# Monkey patch in caching the get_decoder_class method
# Monkey patch in caching the convert_type_string method
#######
if hasattr(RuntimeConfiguration, "get_decoder_class"):
original_get_decoder_class = RuntimeConfiguration.get_decoder_class
if hasattr(RuntimeConfiguration, "convert_type_string"):
original_convert_type_string = RuntimeConfiguration.convert_type_string

@functools.lru_cache(maxsize=None)
def cached_get_decoder_class(self, type_string):
return original_get_decoder_class(self, type_string)
def convert_type_string(cls, name):
return original_convert_type_string(cls, name)

RuntimeConfiguration.convert_type_string = convert_type_string

RuntimeConfiguration.get_decoder_class = cached_get_decoder_class

#######

Expand Down

0 comments on commit 7dd819d

Please sign in to comment.