diff --git a/docs/source/interface.rst b/docs/source/interface.rst index cabf775ffa7303759b286c003c14981a0c0f187d..74da5a897666f800948ed2863b44cc0f98ce9923 100644 --- a/docs/source/interface.rst +++ b/docs/source/interface.rst @@ -54,6 +54,7 @@ The elements of the list are objects like the one returned by issuing a **GET** This endpoint allows an external agent to retrieve unused unblinded tokens present in the node's database. Unblinded tokens are returned in ascending text sorted order. +This order matches the order in which tokens will be used by the system. This endpoint accepts several query arguments: * limit: An integer limiting the number of unblinded tokens to retrieve. diff --git a/src/_zkapauthorizer/model.py b/src/_zkapauthorizer/model.py index 5fa84f84de2dc533925d94298d06b8a131b3ce87..cf61f82421c81d3c7765bad53f9d95700050bec8 100644 --- a/src/_zkapauthorizer/model.py +++ b/src/_zkapauthorizer/model.py @@ -314,7 +314,7 @@ class VoucherStore(object): """ CREATE TEMPORARY TABLE [extracting] AS - SELECT [token] FROM [unblinded-tokens] LIMIT ? + SELECT [token] FROM [unblinded-tokens] ORDER BY [token] LIMIT ? """, (count,), ) diff --git a/src/_zkapauthorizer/tests/test_client_resource.py b/src/_zkapauthorizer/tests/test_client_resource.py index a73b7bb8a63b19bb3e7d0822692171ed0406cd47..c456f3242be4b4fcbde254470ae52a8766af7054 100644 --- a/src/_zkapauthorizer/tests/test_client_resource.py +++ b/src/_zkapauthorizer/tests/test_client_resource.py @@ -44,6 +44,7 @@ from testtools import ( from testtools.matchers import ( MatchesStructure, MatchesAll, + MatchesPredicate, AllMatch, HasLength, IsInstance, @@ -79,6 +80,11 @@ from hypothesis.strategies import ( text, ) +from twisted.internet.defer import ( + Deferred, + maybeDeferred, + gatherResults, +) from twisted.internet.task import ( Cooperator, ) @@ -353,6 +359,61 @@ class UnblindedTokenTests(TestCase): ), ) + @given(tahoe_configs(), vouchers(), integers(min_value=1, max_value=100)) + def test_get_order_matches_use_order(self, get_config, voucher, num_tokens): + """ + The first unblinded token returned in a response to a **GET** request is + the first token to be used to authorize a storage request. + """ + def after(d, f): + new_d = Deferred() + def f_and_continue(result): + maybeDeferred(f).chainDeferred(new_d) + return result + d.addCallback(f_and_continue) + return new_d + + def get_tokens(): + d = agent.request( + b"GET", + b"http://127.0.0.1/unblinded-token", + ) + d.addCallback(readBody) + d.addCallback( + lambda body: loads(body)[u"unblinded-tokens"], + ) + return d + + def use_a_token(): + root.store.extract_unblinded_tokens(1) + + tempdir = self.useFixture(TempDir()) + config = get_config(tempdir.join(b"tahoe"), b"tub.port") + root = root_from_config(config) + + # Put in a number of tokens with which to test. + redeeming = root.controller.redeem(voucher, num_tokens) + # Make sure the operation completed before proceeding. + self.assertThat( + redeeming, + succeeded(Always()), + ) + + agent = RequestTraversalAgent(root) + getting_initial_tokens = get_tokens() + using_a_token = after(getting_initial_tokens, use_a_token) + getting_tokens_after = after(using_a_token, get_tokens) + + self.assertThat( + gatherResults([getting_initial_tokens, getting_tokens_after]), + succeeded( + MatchesPredicate( + lambda (initial_tokens, tokens_after): initial_tokens[1:] == tokens_after, + u"initial, after (%s): initial[1:] != after", + ), + ), + ) + def succeeded_with_unblinded_tokens_with_matcher(all_token_count, match_unblinded_tokens): """ diff --git a/src/_zkapauthorizer/tests/test_model.py b/src/_zkapauthorizer/tests/test_model.py index 14fd2f0a7339fd264ff6ba5e492f55adad6a8b20..282e3c35ca5fe8b5569a52aa33c32bb73947e541 100644 --- a/src/_zkapauthorizer/tests/test_model.py +++ b/src/_zkapauthorizer/tests/test_model.py @@ -276,7 +276,7 @@ class UnblindedTokenStoreTests(TestCase): ) store.insert_unblinded_tokens_for_voucher(voucher_value, tokens) retrieved_tokens = store.extract_unblinded_tokens(len(tokens)) - self.expectThat(tokens, Equals(retrieved_tokens)) + self.expectThat(tokens, AfterPreprocessing(sorted, Equals(retrieved_tokens))) # After extraction, the unblinded tokens are no longer available. more_unblinded_tokens = store.extract_unblinded_tokens(1)