diff options
author | Determinant <[email protected]> | 2022-11-17 18:08:59 -0800 |
---|---|---|
committer | Determinant <[email protected]> | 2022-11-17 18:08:59 -0800 |
commit | 8154806fe2fccacdc3dafaa68181a07bcf8d6c4c (patch) | |
tree | f477e6a005599bb88c18db142c267b9297c6060b /frozen_deps/mnemonic | |
parent | be4dc086591c9bced04a507d127c83811c5700c4 (diff) |
v0.1.7
Diffstat (limited to 'frozen_deps/mnemonic')
-rw-r--r-- | frozen_deps/mnemonic/__init__.py | 4 | ||||
-rw-r--r-- | frozen_deps/mnemonic/mnemonic.py | 119 | ||||
-rw-r--r-- | frozen_deps/mnemonic/py.typed | 1 | ||||
-rw-r--r-- | frozen_deps/mnemonic/wordlist/french.txt | 2 |
4 files changed, 61 insertions, 65 deletions
diff --git a/frozen_deps/mnemonic/__init__.py b/frozen_deps/mnemonic/__init__.py index 47e293d..f8c9e1f 100644 --- a/frozen_deps/mnemonic/__init__.py +++ b/frozen_deps/mnemonic/__init__.py @@ -1 +1,3 @@ -from .mnemonic import Mnemonic # noqa: F401 +from .mnemonic import Mnemonic + +__all__ = ["Mnemonic"] diff --git a/frozen_deps/mnemonic/mnemonic.py b/frozen_deps/mnemonic/mnemonic.py index 935620a..e45111b 100644 --- a/frozen_deps/mnemonic/mnemonic.py +++ b/frozen_deps/mnemonic/mnemonic.py @@ -20,15 +20,15 @@ # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # -import binascii import bisect import hashlib import hmac import itertools import os -import sys +from typing import AnyStr, List, Optional, Sequence, TypeVar, Union import unicodedata +_T = TypeVar("_T") PBKDF2_ROUNDS = 2048 @@ -37,20 +37,23 @@ class ConfigurationError(Exception): # From <https://stackoverflow.com/questions/212358/binary-search-bisection-in-python/2233940#2233940> -def binary_search(a, x, lo=0, hi=None): # can't use a to specify default for hi +def binary_search( + a: Sequence[_T], + x: _T, + lo: int = 0, + hi: Optional[int] = None, # can't use a to specify default for hi +) -> int: hi = hi if hi is not None else len(a) # hi defaults to len(a) pos = bisect.bisect_left(a, x, lo, hi) # find insertion position return pos if pos != hi and a[pos] == x else -1 # don't walk off the end # Refactored code segments from <https://github.com/keis/base58> -def b58encode(v): +def b58encode(v: bytes) -> str: alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" p, acc = 1, 0 for c in reversed(v): - if sys.version < "3": - c = ord(c) acc += p * c p = p << 8 @@ -62,39 +65,36 @@ def b58encode(v): class Mnemonic(object): - def __init__(self, language): + def __init__(self, language: str): + self.language = language self.radix = 2048 - if sys.version < "3": - with open("%s/%s.txt" % (self._get_directory(), language), "r") as f: - self.wordlist = [w.strip().decode("utf8") for w in f.readlines()] - else: - with open( - "%s/%s.txt" % (self._get_directory(), language), "r", encoding="utf-8" - ) as f: - self.wordlist = [w.strip() for w in f.readlines()] + with open( + "%s/%s.txt" % (self._get_directory(), language), "r", encoding="utf-8" + ) as f: + self.wordlist = [w.strip() for w in f.readlines()] if len(self.wordlist) != self.radix: raise ConfigurationError( "Wordlist should contain %d words, but it contains %d words." % (self.radix, len(self.wordlist)) ) - @classmethod - def _get_directory(cls): + @staticmethod + def _get_directory() -> str: return os.path.join(os.path.dirname(__file__), "wordlist") @classmethod - def list_languages(cls): + def list_languages(cls) -> List[str]: return [ f.split(".")[0] for f in os.listdir(cls._get_directory()) if f.endswith(".txt") ] - @classmethod - def normalize_string(cls, txt): - if isinstance(txt, str if sys.version < "3" else bytes): + @staticmethod + def normalize_string(txt: AnyStr) -> str: + if isinstance(txt, bytes): utxt = txt.decode("utf8") - elif isinstance(txt, unicode if sys.version < "3" else str): # noqa: F821 + elif isinstance(txt, str): utxt = txt else: raise TypeError("String value expected") @@ -102,7 +102,7 @@ class Mnemonic(object): return unicodedata.normalize("NFKD", utxt) @classmethod - def detect_language(cls, code): + def detect_language(cls, code: str) -> str: code = cls.normalize_string(code) first = code.split(" ")[0] languages = cls.list_languages() @@ -114,7 +114,7 @@ class Mnemonic(object): raise ConfigurationError("Language not detected") - def generate(self, strength=128): + def generate(self, strength: int = 128) -> str: if strength not in [128, 160, 192, 224, 256]: raise ValueError( "Strength should be one of the following [128, 160, 192, 224, 256], but it is not (%d)." @@ -123,7 +123,7 @@ class Mnemonic(object): return self.to_mnemonic(os.urandom(strength // 8)) # Adapted from <http://tinyurl.com/oxmn476> - def to_entropy(self, words): + def to_entropy(self, words: Union[List[str], str]) -> bytearray: if not isinstance(words, list): words = words.split(" ") if len(words) not in [12, 15, 18, 21, 24]: @@ -136,7 +136,7 @@ class Mnemonic(object): concatLenBits = len(words) * 11 concatBits = [False] * concatLenBits wordindex = 0 - if self.detect_language(" ".join(words)) == "english": + if self.language == "english": use_binary_search = True else: use_binary_search = False @@ -163,28 +163,18 @@ class Mnemonic(object): entropy[ii] |= 1 << (7 - jj) # Take the digest of the entropy. hashBytes = hashlib.sha256(entropy).digest() - if sys.version < "3": - hashBits = list( - itertools.chain.from_iterable( - ( - [ord(c) & (1 << (7 - i)) != 0 for i in range(8)] - for c in hashBytes - ) - ) - ) - else: - hashBits = list( - itertools.chain.from_iterable( - ([c & (1 << (7 - i)) != 0 for i in range(8)] for c in hashBytes) - ) + hashBits = list( + itertools.chain.from_iterable( + [c & (1 << (7 - i)) != 0 for i in range(8)] for c in hashBytes ) + ) # Check all the checksum bits. for i in range(checksumLengthBits): if concatBits[entropyLengthBits + i] != hashBits[i]: raise ValueError("Failed checksum.") return entropy - def to_mnemonic(self, data): + def to_mnemonic(self, data: bytes) -> str: if len(data) not in [16, 20, 24, 28, 32]: raise ValueError( "Data length should be one of the following: [16, 20, 24, 28, 32], but it is not (%d)." @@ -192,39 +182,39 @@ class Mnemonic(object): ) h = hashlib.sha256(data).hexdigest() b = ( - bin(int(binascii.hexlify(data), 16))[2:].zfill(len(data) * 8) + bin(int.from_bytes(data, byteorder="big"))[2:].zfill(len(data) * 8) + bin(int(h, 16))[2:].zfill(256)[: len(data) * 8 // 32] ) result = [] for i in range(len(b) // 11): idx = int(b[i * 11 : (i + 1) * 11], 2) result.append(self.wordlist[idx]) - if ( - self.detect_language(" ".join(result)) == "japanese" - ): # Japanese must be joined by ideographic space. + if self.language == "japanese": # Japanese must be joined by ideographic space. result_phrase = u"\u3000".join(result) else: result_phrase = " ".join(result) return result_phrase - def check(self, mnemonic): - mnemonic = self.normalize_string(mnemonic).split(" ") + def check(self, mnemonic: str) -> bool: + mnemonic_list = self.normalize_string(mnemonic).split(" ") # list of valid mnemonic lengths - if len(mnemonic) not in [12, 15, 18, 21, 24]: + if len(mnemonic_list) not in [12, 15, 18, 21, 24]: return False try: - idx = map(lambda x: bin(self.wordlist.index(x))[2:].zfill(11), mnemonic) + idx = map( + lambda x: bin(self.wordlist.index(x))[2:].zfill(11), mnemonic_list + ) b = "".join(idx) except ValueError: return False l = len(b) # noqa: E741 d = b[: l // 33 * 32] h = b[-l // 33 :] - nd = binascii.unhexlify(hex(int(d, 2))[2:].rstrip("L").zfill(l // 33 * 8)) + nd = int(d, 2).to_bytes(l // 33 * 4, byteorder="big") nh = bin(int(hashlib.sha256(nd).hexdigest(), 16))[2:].zfill(256)[: l // 33] return h == nh - def expand_word(self, prefix): + def expand_word(self, prefix: str) -> str: if prefix in self.wordlist: return prefix else: @@ -236,21 +226,23 @@ class Mnemonic(object): # this is not a validation routine, just return the input return prefix - def expand(self, mnemonic): + def expand(self, mnemonic: str) -> str: return " ".join(map(self.expand_word, mnemonic.split(" "))) @classmethod - def to_seed(cls, mnemonic, passphrase=""): + def to_seed(cls, mnemonic: str, passphrase: str = "") -> bytes: mnemonic = cls.normalize_string(mnemonic) passphrase = cls.normalize_string(passphrase) passphrase = "mnemonic" + passphrase - mnemonic = mnemonic.encode("utf-8") - passphrase = passphrase.encode("utf-8") - stretched = hashlib.pbkdf2_hmac("sha512", mnemonic, passphrase, PBKDF2_ROUNDS) + mnemonic_bytes = mnemonic.encode("utf-8") + passphrase_bytes = passphrase.encode("utf-8") + stretched = hashlib.pbkdf2_hmac( + "sha512", mnemonic_bytes, passphrase_bytes, PBKDF2_ROUNDS + ) return stretched[:64] - @classmethod - def to_hd_master_key(cls, seed): + @staticmethod + def to_hd_master_key(seed: bytes, testnet: bool = False) -> str: if len(seed) != 64: raise ValueError("Provided seed should have length of 64") @@ -259,6 +251,8 @@ class Mnemonic(object): # Serialization format can be found at: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#Serialization_format xprv = b"\x04\x88\xad\xe4" # Version for private mainnet + if testnet: + xprv = b"\x04\x35\x83\x94" # Version for private testnet xprv += b"\x00" * 9 # Depth, parent fingerprint, and child number xprv += seed[32:] # Chain code xprv += b"\x00" + seed[:32] # Master key @@ -274,15 +268,14 @@ class Mnemonic(object): return b58encode(xprv) -def main(): - import binascii +def main() -> None: import sys if len(sys.argv) > 1: - data = sys.argv[1] + hex_data = sys.argv[1] else: - data = sys.stdin.readline().strip() - data = binascii.unhexlify(data) + hex_data = sys.stdin.readline().strip() + data = bytes.fromhex(hex_data) m = Mnemonic("english") print(m.to_mnemonic(data)) diff --git a/frozen_deps/mnemonic/py.typed b/frozen_deps/mnemonic/py.typed new file mode 100644 index 0000000..1242d43 --- /dev/null +++ b/frozen_deps/mnemonic/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. diff --git a/frozen_deps/mnemonic/wordlist/french.txt b/frozen_deps/mnemonic/wordlist/french.txt index 8600949..bcf7942 100644 --- a/frozen_deps/mnemonic/wordlist/french.txt +++ b/frozen_deps/mnemonic/wordlist/french.txt @@ -1,4 +1,4 @@ -abaisser +abaisser abandon abdiquer abeille |