Merge pull request #5758 from mbarkhau/master
Mnemonic performance improvements
This commit is contained in:
@@ -89,20 +89,29 @@ def normalize_text(seed: str) -> str:
|
|||||||
seed = u''.join([seed[i] for i in range(len(seed)) if not (seed[i] in string.whitespace and is_CJK(seed[i-1]) and is_CJK(seed[i+1]))])
|
seed = u''.join([seed[i] for i in range(len(seed)) if not (seed[i] in string.whitespace and is_CJK(seed[i-1]) and is_CJK(seed[i+1]))])
|
||||||
return seed
|
return seed
|
||||||
|
|
||||||
|
|
||||||
|
_WORDLIST_CACHE = {}
|
||||||
|
|
||||||
|
|
||||||
def load_wordlist(filename):
|
def load_wordlist(filename):
|
||||||
path = resource_path('wordlist', filename)
|
path = resource_path('wordlist', filename)
|
||||||
with open(path, 'r', encoding='utf-8') as f:
|
if path not in _WORDLIST_CACHE:
|
||||||
s = f.read().strip()
|
with open(path, 'r', encoding='utf-8') as f:
|
||||||
s = unicodedata.normalize('NFKD', s)
|
s = f.read().strip()
|
||||||
lines = s.split('\n')
|
s = unicodedata.normalize('NFKD', s)
|
||||||
wordlist = []
|
lines = s.split('\n')
|
||||||
for line in lines:
|
wordlist = []
|
||||||
line = line.split('#')[0]
|
for line in lines:
|
||||||
line = line.strip(' \r')
|
line = line.split('#')[0]
|
||||||
assert ' ' not in line
|
line = line.strip(' \r')
|
||||||
if line:
|
assert ' ' not in line
|
||||||
wordlist.append(line)
|
if line:
|
||||||
return wordlist
|
wordlist.append(line)
|
||||||
|
|
||||||
|
# wordlists shouldn't be mutated, but just in case,
|
||||||
|
# convert it to a tuple
|
||||||
|
_WORDLIST_CACHE[path] = tuple(wordlist)
|
||||||
|
return _WORDLIST_CACHE[path]
|
||||||
|
|
||||||
|
|
||||||
filenames = {
|
filenames = {
|
||||||
@@ -114,8 +123,6 @@ filenames = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# FIXME every time we instantiate this class, we read the wordlist from disk
|
|
||||||
# and store a new copy of it in memory
|
|
||||||
class Mnemonic(Logger):
|
class Mnemonic(Logger):
|
||||||
# Seed derivation does not follow BIP39
|
# Seed derivation does not follow BIP39
|
||||||
# Mnemonic phrase uses a hash based checksum, instead of a wordlist-dependent checksum
|
# Mnemonic phrase uses a hash based checksum, instead of a wordlist-dependent checksum
|
||||||
@@ -126,6 +133,7 @@ class Mnemonic(Logger):
|
|||||||
self.logger.info(f'language {lang}')
|
self.logger.info(f'language {lang}')
|
||||||
filename = filenames.get(lang[0:2], 'english.txt')
|
filename = filenames.get(lang[0:2], 'english.txt')
|
||||||
self.wordlist = load_wordlist(filename)
|
self.wordlist = load_wordlist(filename)
|
||||||
|
self.wordlist_indexes = {w: i for i, w in enumerate(self.wordlist)}
|
||||||
self.logger.info(f"wordlist has {len(self.wordlist)} words")
|
self.logger.info(f"wordlist has {len(self.wordlist)} words")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -156,7 +164,7 @@ class Mnemonic(Logger):
|
|||||||
i = 0
|
i = 0
|
||||||
while words:
|
while words:
|
||||||
w = words.pop()
|
w = words.pop()
|
||||||
k = self.wordlist.index(w)
|
k = self.wordlist_indexes[w]
|
||||||
i = i*n + k
|
i = i*n + k
|
||||||
return i
|
return i
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user