diff --git a/src/_zkapauthorizer/tests/test_model.py b/src/_zkapauthorizer/tests/test_model.py index d622aa45e7004117f8d29273f6c45ddb54f44412..b253a6edfc4216431882f6af9778f3361e5f61dd 100644 --- a/src/_zkapauthorizer/tests/test_model.py +++ b/src/_zkapauthorizer/tests/test_model.py @@ -727,31 +727,31 @@ class VoucherTests(TestCase): ) -def paired_tokens(data, sizes=integers(min_value=1, max_value=1000)): +def paired_tokens(num_tokens=integers(min_value=1, max_value=1000)): """ - Draw two lists of the same length, one of random tokens and one of - unblinded tokens. + Build tuples of two lists of the same length, one of random tokens and one + of unblinded tokens. :rtype: ([RandomTokens], [UnblindedTokens]) """ - num_tokens = data.draw(sizes) - r = data.draw( - lists( - random_tokens(), - min_size=num_tokens, - max_size=num_tokens, - unique=True, - ) - ) - u = data.draw( - lists( - unblinded_tokens(), - min_size=num_tokens, - max_size=num_tokens, - unique=True, + + def pairs(num): + return tuples( + lists( + random_tokens(), + min_size=num, + max_size=num, + unique=True, + ), + lists( + unblinded_tokens(), + min_size=num, + max_size=num, + unique=True, + ), ) - ) - return r, u + + return num_tokens.flatmap(pairs) class UnblindedTokenStoreTests(TestCase): @@ -791,15 +791,15 @@ class UnblindedTokenStoreTests(TestCase): vouchers(), dummy_ristretto_keys(), booleans(), - data(), + paired_tokens(), ) def test_unblinded_tokens_round_trip( - self, get_config, now, voucher_value, public_key, completed, data + self, get_config, now, voucher_value, public_key, completed, tokens ): """ Unblinded tokens that are added to the store can later be retrieved and counted. """ - random_tokens, unblinded_tokens = paired_tokens(data) + random_tokens, unblinded_tokens = tokens store = self.useFixture(TemporaryVoucherStore(get_config, lambda: now)).store store.add(voucher_value, len(random_tokens), 0, lambda: random_tokens) store.insert_unblinded_tokens_for_voucher( @@ -829,33 +829,17 @@ class UnblindedTokenStoreTests(TestCase): datetimes(), vouchers(), dummy_ristretto_keys(), - integers(min_value=1, max_value=100), - data(), + paired_tokens(), ) def test_mark_vouchers_redeemed( - self, get_config, now, voucher_value, public_key, num_tokens, data + self, get_config, now, voucher_value, public_key, tokens ): """ The voucher for unblinded tokens that are added to the store is marked as redeemed. """ - random = data.draw( - lists( - random_tokens(), - min_size=num_tokens, - max_size=num_tokens, - unique=True, - ), - ) - unblinded = data.draw( - lists( - unblinded_tokens(), - min_size=num_tokens, - max_size=num_tokens, - unique=True, - ), - ) - + random, unblinded = tokens + num_tokens = len(random) store = self.useFixture(TemporaryVoucherStore(get_config, lambda: now)).store store.add(voucher_value, len(random), 0, lambda: random) store.insert_unblinded_tokens_for_voucher( @@ -908,31 +892,15 @@ class UnblindedTokenStoreTests(TestCase): datetimes(), vouchers(), dummy_ristretto_keys(), - integers(min_value=1, max_value=100), - data(), + paired_tokens(), ) def test_mark_spent_vouchers_double_spent( - self, get_config, now, voucher_value, public_key, num_tokens, data + self, get_config, now, voucher_value, public_key, tokens ): """ A voucher which has already been spent cannot be marked as double-spent. """ - random = data.draw( - lists( - random_tokens(), - min_size=num_tokens, - max_size=num_tokens, - unique=True, - ), - ) - unblinded = data.draw( - lists( - unblinded_tokens(), - min_size=num_tokens, - max_size=num_tokens, - unique=True, - ), - ) + random, unblinded = tokens store = self.useFixture(TemporaryVoucherStore(get_config, lambda: now)).store store.add(voucher_value, len(random), 0, lambda: random) store.insert_unblinded_tokens_for_voucher( @@ -959,22 +927,31 @@ class UnblindedTokenStoreTests(TestCase): ) @given( - tahoe_configs(), - datetimes(), - vouchers(), - dummy_ristretto_keys(), - booleans(), - integers(min_value=1), - data(), + get_config=tahoe_configs(), + now=datetimes(), + voucher_value=vouchers(), + public_key=dummy_ristretto_keys(), + completed=booleans(), + extra_bits=integers(min_value=1, max_value=128), + extra_fuzz=integers(min_value=1), + tokens=paired_tokens(), ) def test_not_enough_unblinded_tokens( - self, get_config, now, voucher_value, public_key, completed, extra, data + self, + get_config, + now, + voucher_value, + public_key, + completed, + extra_bits, + extra_fuzz, + tokens, ): """ ``get_unblinded_tokens`` raises ``NotEnoughTokens`` if ``count`` is greater than the number of unblinded tokens in the store. """ - random, unblinded = paired_tokens(data) + random, unblinded = tokens num_tokens = len(random) store = self.useFixture(TemporaryVoucherStore(get_config, lambda: now)).store store.add(voucher_value, len(random), 0, lambda: random) @@ -985,6 +962,19 @@ class UnblindedTokenStoreTests(TestCase): completed, spendable=True, ) + # Compute a number of "extra" tokens to request -- tokens that won't + # be available -- in a way that tries to distribute that number across + # a very large range. We know that the implementation has a boundary + # around 2 ** 63 and using a simple integers() strategy for extra, + # Hypothesis usually tries examples on both sides of that boundary - + # but not always. *Most* of the numbers between 1 and 2 ** 63 are + # invalid in exactly the same way (as far as I know) but I'd still + # like to try them out a bit. + # + # Better factoring for this would probably be to have a + # exponential_integers() strategy... Maybe? + extra = 2 ** extra_bits + extra += extra_fuzz % extra self.assertThat( lambda: store.get_unblinded_tokens(num_tokens + extra), raises(NotEnoughTokens),