diff --git a/src/_zkapauthorizer/_plugin.py b/src/_zkapauthorizer/_plugin.py
index e65395564042e697efeb392904d9a931cc46a510..fbc01e94679391a6135be649a5acb0a7e8333ea2 100644
--- a/src/_zkapauthorizer/_plugin.py
+++ b/src/_zkapauthorizer/_plugin.py
@@ -175,10 +175,10 @@ class ZKAPAuthorizer(object):
         redeemer = self._get_redeemer(node_config, announcement, reactor)
         store = self._get_store(node_config)
         # XXX Need to ensure one of these per store
-        controller = SpendingController(
-            store.extract_unblinded_tokens,
-            redeemer.tokens_to_passes,
-        )
+        controller = SpendingController.for_store(
+            tokens_to_passes=redeemer.tokens_to_passes,
+            store=store,
+       )
         get_passes = controller.get
         return ZKAPAuthorizerStorageClient(
             get_configured_pass_value(node_config),
diff --git a/src/_zkapauthorizer/model.py b/src/_zkapauthorizer/model.py
index 8b4a70810f97e4f26358d22b53650842b1e7977a..e472bd8ac0b0562dedec3126d9b201690e5d7fd7 100644
--- a/src/_zkapauthorizer/model.py
+++ b/src/_zkapauthorizer/model.py
@@ -17,6 +17,9 @@ This module implements models (in the MVC sense) for the client side of
 the storage plugin.
 """
 
+from uuid import (
+    uuid4,
+)
 from functools import (
     wraps,
 )
@@ -144,14 +147,30 @@ def open_and_initialize(path, connect=None):
         actual_version = get_schema_version(cursor)
         schema_upgrades = list(get_schema_upgrades(actual_version))
         run_schema_upgrades(schema_upgrades, cursor)
+
+    conn.execute("""
+    -- It might already exist if there is still another connection to this
+    -- database.  It goes away once all connections have been closed, though.
+    CREATE TABLE IF NOT EXISTS [temp.in-use] (
+        [unblinded-token] text, -- The base64 encoded unblinded token.
+        [operation-id] text,    -- A unique identifier for a group of tokens in-use together.
+
+        PRIMARY KEY([unblinded-token])
+        FOREIGN KEY([unblinded-token]) REFERENCES [unblinded-tokens]([token])
+    )
+    """)
+
     return conn
 
 
+
 def with_cursor(f):
     @wraps(f)
     def with_cursor(self, *a, **kw):
         with self._connection:
-            return f(self, self._connection.cursor(), *a, **kw)
+            cursor = self._connection.cursor()
+            cursor.execute("BEGIN IMMEDIATE TRANSACTION")
+            return f(self, cursor, *a, **kw)
     return with_cursor
 
 
@@ -162,6 +181,11 @@ def memory_connect(path, *a, **kw):
     return _connect(":memory:", *a, **kw)
 
 
+# The largest integer SQLite3 can represent in an integer column.  Larger than
+# this an the representation loses precision as a floating point.
+_SQLITE3_INTEGER_MAX = 2 ** 63 - 1
+
+
 @attr.s(frozen=True)
 class VoucherStore(object):
     """
@@ -260,7 +284,6 @@ class VoucherStore(object):
         if not isinstance(now, datetime):
             raise TypeError("{} returned {}, expected datetime".format(self.now, now))
 
-        cursor.execute("BEGIN IMMEDIATE TRANSACTION")
         cursor.execute(
             """
             SELECT [text]
@@ -306,7 +329,6 @@ class VoucherStore(object):
                     in tokens
                 ),
             )
-        cursor.connection.commit()
         return tokens
 
     @with_cursor
@@ -446,6 +468,163 @@ class VoucherStore(object):
                     ),
                 )
 
+    @with_cursor
+    def get_unblinded_tokens(self, cursor, count):
+        """
+        Get some unblinded tokens.
+
+        These tokens are not removed from the store but they will not be
+        returned from a future call to ``get_unblinded_tokens`` *on this
+        ``VoucherStore`` instance* unless ``reset_unblinded_tokens`` is used
+        to reset their state.
+
+        If the underlying storage is access via another ``VoucherStore``
+        instance then the behavior of this method will be as if all tokens
+        which have not had their state changed to invalid or spent have been
+        reset.
+
+        :return list[UnblindedTokens]: The removed unblinded tokens.
+        """
+        if count > _SQLITE3_INTEGER_MAX:
+            # An unreasonable number of tokens and also large enough to
+            # provoke undesirable behavior from the database.
+            raise NotEnoughTokens()
+
+        operation_id = unicode(uuid4())
+        cursor.execute(
+            """
+            INSERT INTO [temp.in-use]
+            SELECT [token], ?
+            FROM [unblinded-tokens]
+            WHERE [token] NOT IN (SELECT [unblinded-token] FROM [temp.in-use])
+            LIMIT ?
+            """,
+            (operation_id, count),
+        )
+        if cursor.rowcount < count:
+            raise NotEnoughTokens()
+
+        cursor.execute(
+            """
+            SELECT [unblinded-token] FROM [temp.in-use] WHERE [operation-id] = ?
+            """,
+            (operation_id,),
+        )
+        texts = cursor.fetchall()
+        return list(
+            UnblindedToken(t)
+            for (t,)
+            in texts
+        )
+
+    @with_cursor
+    def discard_unblinded_tokens(self, cursor, unblinded_tokens):
+        """
+        Get rid of some unblinded tokens.  The tokens will be completely removed
+        from the system.  This is useful when the tokens have been
+        successfully spent.
+
+        :param list[UnblindedToken] unblinded_tokens: The tokens to discard.
+
+        :return: ``None``
+        """
+        cursor.execute(
+            """
+            CREATE TEMPORARY TABLE [to-discard] (
+              [unblinded-token] text
+            )
+        """)
+        cursor.executemany(
+            """
+            INSERT INTO [to-discard] VALUES (?)
+            """,
+            list((token.unblinded_token,) for token in unblinded_tokens),
+        )
+        cursor.execute(
+            """
+            DELETE FROM [temp.in-use]
+            WHERE [unblinded-token] IN (SELECT [unblinded-token] FROM [to-discard])
+            """,
+        )
+        cursor.execute(
+            """
+            DELETE FROM [unblinded-tokens]
+            WHERE [token] IN (SELECT [unblinded-token] FROM [to-discard])
+            """,
+        )
+        cursor.execute(
+            """
+            DROP TABLE [to-discard]
+            """,
+        )
+
+    @with_cursor
+    def invalidate_unblinded_tokens(self, cursor, reason, unblinded_tokens):
+        """
+        Mark some unblinded tokens as invalid and unusable.  Some record of the
+        tokens may be retained for future inspection.  These tokens will not
+        be returned by any future ``get_unblinded_tokens`` call.  This is
+        useful when an attempt to spend a token has met with rejection by the
+        validator.
+
+        :param list[UnblindedToken] unblinded_tokens: The tokens to mark.
+
+        :return: ``None``
+        """
+        cursor.executemany(
+            """
+            INSERT INTO [invalid-unblinded-tokens] VALUES (?, ?)
+            """,
+            list(
+                (token.unblinded_token, reason)
+                for token
+                in unblinded_tokens
+            ),
+        )
+        cursor.execute(
+            """
+            DELETE FROM [temp.in-use]
+            WHERE [unblinded-token] IN (SELECT [token] FROM [invalid-unblinded-tokens])
+            """,
+        )
+        cursor.execute(
+            """
+            DELETE FROM [unblinded-tokens]
+            WHERE [token] IN (SELECT [token] FROM [invalid-unblinded-tokens])
+            """,
+        )
+
+    @with_cursor
+    def reset_unblinded_tokens(self, cursor, unblinded_tokens):
+        """
+        Make some unblinded tokens available to be retrieved from the store again.
+        This is useful if a spending operation has failed with a transient
+        error.
+        """
+        cursor.execute(
+            """
+            CREATE TEMPORARY TABLE [to-reset] (
+              [unblinded-token] text
+            )
+            """,
+        )
+        cursor.executemany(
+            """
+            INSERT INTO [to-reset] VALUES (?)
+            """,
+            list((token.unblinded_token,) for token in unblinded_tokens),
+        )
+        cursor.execute(
+            """
+            DELETE FROM [temp.in-use]
+            WHERE [unblinded-token] IN (SELECT [unblinded-token] FROM [to-reset])
+            """,
+        )
+        cursor.execute(
+            """
+            DROP TABLE [to-reset]
+            """,
+        )
 
     @with_cursor
     def extract_unblinded_tokens(self, cursor, count):
diff --git a/src/_zkapauthorizer/schema.py b/src/_zkapauthorizer/schema.py
index a23d3373c9a230d874710183e046d9e9cef954e6..5044153e08c31a211d8bcaf35dbb2efffea46626 100644
--- a/src/_zkapauthorizer/schema.py
+++ b/src/_zkapauthorizer/schema.py
@@ -156,4 +156,15 @@ _UPGRADES = {
         ALTER TABLE [vouchers] ADD COLUMN [expected-tokens] integer NOT NULL DEFAULT 32768
         """,
     ],
+
+    4: [
+        """
+        CREATE TABLE [invalid-unblinded-tokens] (
+            [token] text,  -- The base64 encoded unblinded token.
+            [reason] text, -- The reason given for it being considered invalid.
+
+            PRIMARY KEY([token])
+        )
+        """,
+    ],
 }
diff --git a/src/_zkapauthorizer/spending.py b/src/_zkapauthorizer/spending.py
index a2836e18f90eec1e13ff8c32edf311e25b8dad82..78fd7263eab769fe1585446e9b7af465d769b8f6 100644
--- a/src/_zkapauthorizer/spending.py
+++ b/src/_zkapauthorizer/spending.py
@@ -114,35 +114,51 @@ class PassGroup(object):
     """
     _message = attr.ib()
     _factory = attr.ib()
-    passes = attr.ib()
+    _tokens = attr.ib()
+
+    @property
+    def passes(self):
+        return list(
+            pass_
+            for (unblinded_token, pass_)
+            in self._tokens
+        )
+
+    @property
+    def unblinded_tokens(self):
+        return list(
+            unblinded_token
+            for (unblinded_token, pass_)
+            in self._tokens
+        )
 
     def split(self, select_indices):
         selected = []
         unselected = []
-        for idx, p in enumerate(self.passes):
+        for idx, t in enumerate(self._tokens):
             if idx in select_indices:
-                selected.append(p)
+                selected.append(t)
             else:
-                unselected.append(p)
+                unselected.append(t)
         return (
-            attr.evolve(self, passes=selected),
-            attr.evolve(self, passes=unselected),
+            attr.evolve(self, tokens=selected),
+            attr.evolve(self, tokens=unselected),
         )
 
     def expand(self, by_amount):
         return attr.evolve(
             self,
-            passes=self.passes + self._factory.get(self._message, by_amount).passes,
+            tokens=self._tokens + self._factory.get(self._message, by_amount)._tokens,
         )
 
     def mark_spent(self):
-        self._factory._mark_spent(self.passes)
+        self._factory._mark_spent(self.unblinded_tokens)
 
     def mark_invalid(self, reason):
-        self._factory._mark_invalid(reason, self.passes)
+        self._factory._mark_invalid(reason, self.unblinded_tokens)
 
     def reset(self):
-        self._factory._reset(self.passes)
+        self._factory._reset(self.unblinded_tokens)
 
 
 @implementer(IPassFactory)
@@ -152,26 +168,37 @@ class SpendingController(object):
     A ``SpendingController`` gives out ZKAPs and arranges for re-spend
     attempts when necessary.
     """
-    extract_unblinded_tokens = attr.ib()
+    get_unblinded_tokens = attr.ib()
+    discard_unblinded_tokens = attr.ib()
+    invalidate_unblinded_tokens = attr.ib()
+    reset_unblinded_tokens = attr.ib()
+
     tokens_to_passes = attr.ib()
 
+    @classmethod
+    def for_store(cls, tokens_to_passes, store):
+        return cls(
+            get_unblinded_tokens=store.get_unblinded_tokens,
+            discard_unblinded_tokens=store.discard_unblinded_tokens,
+            invalidate_unblinded_tokens=store.invalidate_unblinded_tokens,
+            reset_unblinded_tokens=store.reset_unblinded_tokens,
+            tokens_to_passes=tokens_to_passes,
+        )
+
     def get(self, message, num_passes):
-        unblinded_tokens = self.extract_unblinded_tokens(num_passes)
+        unblinded_tokens = self.get_unblinded_tokens(num_passes)
         passes = self.tokens_to_passes(message, unblinded_tokens)
         GET_PASSES.log(
             message=message,
             count=num_passes,
         )
-        return PassGroup(message, self, passes)
+        return PassGroup(message, self, zip(unblinded_tokens, passes))
 
-    def _mark_spent(self, group):
-        # TODO
-        pass
+    def _mark_spent(self, unblinded_tokens):
+        self.discard_unblinded_tokens(unblinded_tokens)
 
-    def _mark_invalid(self, reason, group):
-        # TODO
-        pass
+    def _mark_invalid(self, reason, unblinded_tokens):
+        self.invalidate_unblinded_tokens(reason, unblinded_tokens)
 
-    def _reset(self, group):
-        # TODO
-        pass
+    def _reset(self, unblinded_tokens):
+        self.reset_unblinded_tokens(unblinded_tokens)
diff --git a/src/_zkapauthorizer/tests/__init__.py b/src/_zkapauthorizer/tests/__init__.py
index 0f9529a87a1b9837c7b34798450815918ee5a9f5..102647a022c45553eb27ea9da5dfd0e433a11941 100644
--- a/src/_zkapauthorizer/tests/__init__.py
+++ b/src/_zkapauthorizer/tests/__init__.py
@@ -57,6 +57,12 @@ def _configure_hypothesis():
     settings.register_profile(
         "big",
         max_examples=10000,
+        # The only rule-based state machine we have now is quite simple and
+        # can probably be completely explored in about 5 steps.  Give it some
+        # headroom beyond that in case I'm wrong but don't let it run to the
+        # full 50 because, combined with searching for 10000 successful
+        # examples this makes the stateful test take *ages* to complete.
+        stateful_step_count=15,
         **base
     )
 
diff --git a/src/_zkapauthorizer/tests/fixtures.py b/src/_zkapauthorizer/tests/fixtures.py
index eb64887b6798e2d2b164e289bf095b5777276f66..00be5b25283194c4a9454d6fa5314a6695b60650 100644
--- a/src/_zkapauthorizer/tests/fixtures.py
+++ b/src/_zkapauthorizer/tests/fixtures.py
@@ -37,8 +37,12 @@ from allmydata.storage.server import (
 
 from ..model import (
     VoucherStore,
+    open_and_initialize,
     memory_connect,
 )
+from ..controller import (
+    PaymentController,
+)
 
 class AnonymousStorageServer(Fixture):
     """
@@ -82,3 +86,45 @@ class TemporaryVoucherStore(Fixture):
             self.get_now,
             memory_connect,
         )
+
+
+@attr.s
+class ConfiglessMemoryVoucherStore(Fixture):
+    """
+    Create a ``VoucherStore`` backed by an in-memory database and with no
+    associated Tahoe-LAFS configuration or node.
+
+    This is like ``TemporaryVoucherStore`` but faster because it skips the
+    Tahoe-LAFS parts.
+    """
+    redeemer = attr.ib()
+    get_now = attr.ib()
+
+    def _setUp(self):
+        here = FilePath(u".")
+        self.store = VoucherStore(
+            pass_value=2 ** 15,
+            database_path=here,
+            now=self.get_now,
+            connection=open_and_initialize(here, memory_connect),
+        )
+
+    def redeem(self, voucher, num_passes):
+        """
+        Redeem a voucher for some passes.
+
+        :return: A ``Deferred`` that fires with the redemption result.
+        """
+        return PaymentController(
+            self.store,
+            self.redeemer,
+            # Have to pass it here or to redeem, doesn't matter which.
+            default_token_count=num_passes,
+            # No value in splitting it into smaller groups in this case.
+            # Doing so only complicates the test by imposing a different
+            # minimum token count requirement (can't have fewer tokens
+            # than groups).
+            num_redemption_groups=1,
+        ).redeem(
+            voucher,
+        )
diff --git a/src/_zkapauthorizer/tests/storage_common.py b/src/_zkapauthorizer/tests/storage_common.py
index ca2915f963d7c16a66e979db984a89c1524aa518..32ef040d24e652320e3b4eaee69c5f13b2c99b91 100644
--- a/src/_zkapauthorizer/tests/storage_common.py
+++ b/src/_zkapauthorizer/tests/storage_common.py
@@ -186,7 +186,7 @@ def get_passes(message, count, signing_key):
 
     :param int count: The number of passes to get.
 
-    :param SigningKEy signing_key: The key to use to sign the passes.
+    :param SigningKey signing_key: The key to use to sign the passes.
 
     :return list[Pass]: ``count`` new random passes signed with the given key
         and bound to the given message.
@@ -268,7 +268,7 @@ class _PassFactory(object):
         passes.extend(self._get_passes(message, num_passes))
         self.issued.update(passes)
         self.in_use.update(passes)
-        return PassGroup(message, self, passes)
+        return PassGroup(message, self, zip(passes, passes))
 
     def _mark_spent(self, passes):
         for p in passes:
diff --git a/src/_zkapauthorizer/tests/test_client_resource.py b/src/_zkapauthorizer/tests/test_client_resource.py
index 7aabbdb359b3ad9a70644cfd806520884d84fdb3..9ff7ffb7f1e246ff8b5093a64661b2af291d5f8a 100644
--- a/src/_zkapauthorizer/tests/test_client_resource.py
+++ b/src/_zkapauthorizer/tests/test_client_resource.py
@@ -523,7 +523,9 @@ class UnblindedTokenTests(TestCase):
             return d
 
         def use_a_token():
-            root.store.extract_unblinded_tokens(1)
+            root.store.discard_unblinded_tokens(
+                root.store.get_unblinded_tokens(1),
+            )
 
         tempdir = self.useFixture(TempDir())
         config = get_config(tempdir.join(b"tahoe"), b"tub.port")
diff --git a/src/_zkapauthorizer/tests/test_model.py b/src/_zkapauthorizer/tests/test_model.py
index e13856f349176a47cd1f4347cc1f471d38a66945..c0eebaed864284f98cee79ef4c8bd89342de6340 100644
--- a/src/_zkapauthorizer/tests/test_model.py
+++ b/src/_zkapauthorizer/tests/test_model.py
@@ -28,6 +28,7 @@ from errno import (
     EACCES,
 )
 from datetime import (
+    datetime,
     timedelta,
 )
 
@@ -39,6 +40,8 @@ from testtools import (
     TestCase,
 )
 from testtools.matchers import (
+    Always,
+    HasLength,
     AfterPreprocessing,
     MatchesStructure,
     MatchesAll,
@@ -46,15 +49,26 @@ from testtools.matchers import (
     Raises,
     IsInstance,
 )
+from testtools.twistedsupport import (
+    succeeded,
+)
 
 from fixtures import (
     TempDir,
 )
 
 from hypothesis import (
+    note,
     given,
+    assume,
+)
+from hypothesis.stateful import (
+    RuleBasedStateMachine,
+    rule,
+    precondition,
+    invariant,
+    run_state_machine_as_test
 )
-
 from hypothesis.strategies import (
     data,
     booleans,
@@ -63,6 +77,7 @@ from hypothesis.strategies import (
     datetimes,
     timedeltas,
     integers,
+    randoms,
 )
 
 from twisted.python.runtime import (
@@ -80,7 +95,9 @@ from ..model import (
     LeaseMaintenanceActivity,
     memory_connect,
 )
-
+from ..controller import (
+    DummyRedeemer,
+)
 from .strategies import (
     tahoe_configs,
     vouchers,
@@ -90,9 +107,11 @@ from .strategies import (
     unblinded_tokens,
     posix_safe_datetimes,
     dummy_ristretto_keys,
+    pass_counts,
 )
 from .fixtures import (
     TemporaryVoucherStore,
+    ConfiglessMemoryVoucherStore,
 )
 from .matchers import (
     raises,
@@ -314,7 +333,7 @@ class VoucherStoreTests(TestCase):
     def test_spend_order_equals_backup_order(self, get_config, voucher_value, public_key, now, data):
         """
         Unblinded tokens returned by ``VoucherStore.backup`` appear in the same
-        order as they are returned ``VoucherStore.extract_unblinded_tokens``.
+        order as they are returned ``VoucherStore.get_unblinded_tokens``.
         """
         backed_up_tokens, spent_tokens, inserted_tokens = self._spend_order_test(
             get_config,
@@ -332,7 +351,7 @@ class VoucherStoreTests(TestCase):
     @given(tahoe_configs(), vouchers(), dummy_ristretto_keys(), datetimes(), data())
     def test_spend_order_equals_insert_order(self, get_config, voucher_value, public_key, now, data):
         """
-        Unblinded tokens returned by ``VoucherStore.extract_unblinded_tokens``
+        Unblinded tokens returned by ``VoucherStore.get_unblinded_tokens``
         appear in the same order as they were inserted.
         """
         backed_up_tokens, spent_tokens, inserted_tokens = self._spend_order_test(
@@ -386,7 +405,7 @@ class VoucherStoreTests(TestCase):
             extracted_tokens.extend(
                 token.unblinded_token
                 for token
-                in store.extract_unblinded_tokens(to_spend)
+                in store.get_unblinded_tokens(to_spend)
             )
             tokens_remaining -= to_spend
 
@@ -397,6 +416,183 @@ class VoucherStoreTests(TestCase):
         )
 
 
+class UnblindedTokenStateMachine(RuleBasedStateMachine):
+    """
+    Transition rules for a state machine corresponding to the state of
+    unblinded tokens in a ``VoucherStore`` - usable, in-use, spent, invalid,
+    etc.
+    """
+    def __init__(self, case):
+        super(UnblindedTokenStateMachine, self).__init__()
+        self.case = case
+        self.redeemer = DummyRedeemer()
+        self.configless = ConfiglessMemoryVoucherStore(
+            self.redeemer,
+            # Time probably not actually relevant to this state machine.
+            datetime.now,
+        )
+        self.configless.setUp()
+
+        self.available = 0
+        self.using = []
+        self.spent = []
+        self.invalid = []
+
+    def teardown(self):
+        self.configless.cleanUp()
+
+    @rule(voucher=vouchers(), num_passes=pass_counts())
+    def redeem_voucher(self, voucher, num_passes):
+        """
+        A voucher can be redeemed, adding more unblinded tokens to the store.
+        """
+        try:
+            self.configless.store.get(voucher)
+        except KeyError:
+            pass
+        else:
+            # Cannot redeem a voucher more than once.  We redeemed this one
+            # already.
+            assume(False)
+
+        self.case.assertThat(
+            self.configless.redeem(voucher, num_passes),
+            succeeded(Always()),
+        )
+        self.available += num_passes
+
+    @rule(num_passes=pass_counts())
+    def get_passes(self, num_passes):
+        """
+        Some passes can be requested from the store.  The resulting passes are not
+        in use, spent, or invalid.
+        """
+        assume(num_passes <= self.available)
+        tokens = self.configless.store.get_unblinded_tokens(num_passes)
+        note("get_passes: {}".format(tokens))
+
+        # No tokens we are currently using may be returned again.  Nor may
+        # tokens which have reached a terminal state of spent or invalid.
+        unavailable = set(self.using) | set(self.spent) | set(self.invalid)
+
+        self.case.assertThat(
+            tokens,
+            MatchesAll(
+                HasLength(num_passes),
+                AfterPreprocessing(
+                    lambda t: set(t) & unavailable,
+                    Equals(set()),
+                ),
+            ),
+        )
+        self.using.extend(tokens)
+        self.available -= num_passes
+
+    @rule(excess_passes=pass_counts())
+    def not_enough_passes(self, excess_passes):
+        """
+        If an attempt is made to get more passes than are available,
+        ``get_unblinded_tokens`` raises ``NotEnoughTokens``.
+        """
+        self.case.assertThat(
+            lambda: self.configless.store.get_unblinded_tokens(
+                self.available + excess_passes,
+            ),
+            raises(NotEnoughTokens),
+        )
+
+    @precondition(lambda self: len(self.using) > 0)
+    @rule(random=randoms(), data=data())
+    def spend_passes(self, random, data):
+        """
+        Some in-use passes can be discarded.
+        """
+        self.using, to_spend = random_slice(self.using, random, data)
+        note("spend_passes: {}".format(to_spend))
+        self.configless.store.discard_unblinded_tokens(to_spend)
+
+    @precondition(lambda self: len(self.using) > 0)
+    @rule(random=randoms(), data=data())
+    def reset_passes(self, random, data):
+        """
+        Some in-use passes can be returned to not-in-use state.
+        """
+        self.using, to_reset = random_slice(self.using, random, data)
+        note("reset_passes: {}".format(to_reset))
+        self.configless.store.reset_unblinded_tokens(to_reset)
+        self.available += len(to_reset)
+
+    @precondition(lambda self: len(self.using) > 0)
+    @rule(random=randoms(), data=data())
+    def invalidate_passes(self, random, data):
+        """
+        Some in-use passes are unusable and should be set aside.
+        """
+        self.using, to_invalidate = random_slice(self.using, random, data)
+        note("invalidate_passes: {}".format(to_invalidate))
+        self.configless.store.invalidate_unblinded_tokens(
+            u"reason",
+            to_invalidate,
+        )
+        self.invalid.extend(to_invalidate)
+
+    @rule()
+    def discard_ephemeral_state(self):
+        """
+        Reset all state that cannot outlive a single process, simulating a
+        restart.
+
+        XXX We have to reach into the guts of ``VoucherStore`` to do this
+        because we're using an in-memory database.  We can't just open a new
+        ``VoucherStore``. :/ Perhaps we should use an on-disk database...  Or
+        maybe this is a good argument for using an explicitly attached
+        temporary database instead of the built-in ``temp`` database.
+        """
+        with self.configless.store._connection:
+            self.configless.store._connection.execute(
+                """
+                DELETE FROM [temp.in-use]
+                """,
+            )
+        self.available += len(self.using)
+        del self.using[:]
+
+    @invariant()
+    def report_state(self):
+        note("available={} using={} invalid={} spent={}".format(
+            self.available,
+            len(self.using),
+            len(self.invalid),
+            len(self.spent),
+        ))
+
+
+
+def random_slice(taken_from, random, data):
+    """
+    Divide ``taken_from`` into two pieces with elements randomly assigned to
+    one piece or the other.
+
+    :param list taken_from: A list of elements to divide.  This will be
+        mutated.
+
+    :param random: A ``random`` module-alike.
+
+    :param data: A Hypothesis data object for drawing values.
+
+    :return: A two-tuple of the two resulting lists.
+    """
+    count = data.draw(integers(min_value=1, max_value=len(taken_from)))
+    random.shuffle(taken_from)
+    remaining = taken_from[:-count]
+    sliced = taken_from[-count:]
+    return remaining, sliced
+
+
+class UnblindedTokenStateTests(TestCase):
+    def test_states(self):
+        run_state_machine_as_test(lambda: UnblindedTokenStateMachine(self))
+
 
 class LeaseMaintenanceTests(TestCase):
     """
@@ -552,19 +748,13 @@ class UnblindedTokenStoreTests(TestCase):
         store = self.useFixture(TemporaryVoucherStore(get_config, lambda: now)).store
         store.add(voucher_value, len(random_tokens), 0, lambda: random_tokens)
         store.insert_unblinded_tokens_for_voucher(voucher_value, public_key, unblinded_tokens, completed)
-        retrieved_tokens = store.extract_unblinded_tokens(len(random_tokens))
+        retrieved_tokens = store.get_unblinded_tokens(len(random_tokens))
 
         self.expectThat(
             set(unblinded_tokens),
             Equals(set(retrieved_tokens)),
         )
 
-        # After extraction, the unblinded tokens are no longer available.
-        self.assertThat(
-            lambda: store.extract_unblinded_tokens(1),
-            raises(NotEnoughTokens),
-        )
-
     @given(
         tahoe_configs(),
         datetimes(),
@@ -698,7 +888,7 @@ class UnblindedTokenStoreTests(TestCase):
     )
     def test_not_enough_unblinded_tokens(self, get_config, now, voucher_value, public_key, completed, num_tokens, extra, data):
         """
-        ``extract_unblinded_tokens`` raises ``NotEnoughTokens`` if ``count`` is
+        ``get_unblinded_tokens`` raises ``NotEnoughTokens`` if ``count`` is
         greater than the number of unblinded tokens in the store.
         """
         random = data.draw(
@@ -722,13 +912,11 @@ class UnblindedTokenStoreTests(TestCase):
         store.insert_unblinded_tokens_for_voucher(voucher_value, public_key, unblinded, completed)
 
         self.assertThat(
-            lambda: store.extract_unblinded_tokens(num_tokens + extra),
+            lambda: store.get_unblinded_tokens(num_tokens + extra),
             raises(NotEnoughTokens),
         )
 
 
-    # TODO: Other error states and transient states
-
 
 def store_for_test(testcase, get_config, get_now):
     """
diff --git a/src/_zkapauthorizer/tests/test_plugin.py b/src/_zkapauthorizer/tests/test_plugin.py
index da72930e2a9e497beed3a0f4e143a36603e896a6..ce04c94e3bc9fbae49b38e1f7c5d0d3f1e1adaa2 100644
--- a/src/_zkapauthorizer/tests/test_plugin.py
+++ b/src/_zkapauthorizer/tests/test_plugin.py
@@ -415,7 +415,7 @@ class ClientPluginTests(TestCase):
         size=sizes(),
     )
     @capture_logging(lambda self, logger: logger.validate())
-    def test_unblinded_tokens_extracted(
+    def test_unblinded_tokens_spent(
             self,
             logger,
             get_config,
@@ -430,7 +430,7 @@ class ClientPluginTests(TestCase):
     ):
         """
         The ``ZKAPAuthorizerStorageServer`` returned by ``get_storage_client``
-        extracts unblinded tokens from the plugin database.
+        spends unblinded tokens from the plugin database.
         """
         tempdir = self.useFixture(TempDir())
         node_config = get_config(
@@ -476,7 +476,7 @@ class ClientPluginTests(TestCase):
 
         # There should be no unblinded tokens left to extract.
         self.assertThat(
-            lambda: store.extract_unblinded_tokens(1),
+            lambda: store.get_unblinded_tokens(1),
             raises(NotEnoughTokens),
         )
 
diff --git a/src/_zkapauthorizer/tests/test_spending.py b/src/_zkapauthorizer/tests/test_spending.py
index 62473bb8f288117a16189598b22fd457b611ccfb..e55f289a3936a709566101f2effe35fecb2855dc 100644
--- a/src/_zkapauthorizer/tests/test_spending.py
+++ b/src/_zkapauthorizer/tests/test_spending.py
@@ -21,9 +21,11 @@ from testtools import (
 )
 from testtools.matchers import (
     Always,
+    Equals,
     MatchesAll,
     MatchesStructure,
     HasLength,
+    AfterPreprocessing,
 )
 from testtools.twistedsupport import (
     succeeded,
@@ -32,9 +34,10 @@ from testtools.twistedsupport import (
 from hypothesis import (
     given,
 )
-
-from twisted.python.filepath import (
-    FilePath,
+from hypothesis.strategies import (
+    integers,
+    randoms,
+    data,
 )
 
 from .strategies import (
@@ -45,14 +48,11 @@ from .strategies import (
 from .matchers import (
     Provides,
 )
-from ..model import (
-    VoucherStore,
-    open_and_initialize,
-    memory_connect,
+from .fixtures import (
+    ConfiglessMemoryVoucherStore,
 )
 from ..controller import (
     DummyRedeemer,
-    PaymentController,
 )
 from ..spending import (
     IPassGroup,
@@ -69,35 +69,21 @@ class PassGroupTests(TestCase):
         ``IPassFactory.get`` returns an ``IPassGroup`` provider containing the
         requested number of passes.
         """
-        redeemer = DummyRedeemer()
-        here = FilePath(u".")
-        store = VoucherStore(
-            pass_value=2 ** 15,
-            database_path=here,
-            now=lambda: now,
-            connection=open_and_initialize(here, memory_connect),
+        configless = self.useFixture(
+            ConfiglessMemoryVoucherStore(
+                DummyRedeemer(),
+                lambda: now,
+            ),
         )
         # Make sure there are enough tokens for us to extract!
         self.assertThat(
-            PaymentController(
-                store,
-                redeemer,
-                # Have to pass it here or to redeem, doesn't matter which.
-                default_token_count=num_passes,
-                # No value in splitting it into smaller groups in this case.
-                # Doing so only complicates the test by imposing a different
-                # minimum token count requirement (can't have fewer tokens
-                # than groups).
-                num_redemption_groups=1,
-            ).redeem(
-                voucher,
-            ),
+            configless.redeem(voucher, num_passes),
             succeeded(Always()),
         )
 
-        pass_factory = SpendingController(
-            extract_unblinded_tokens=store.extract_unblinded_tokens,
-            tokens_to_passes=redeemer.tokens_to_passes,
+        pass_factory = SpendingController.for_store(
+            tokens_to_passes=configless.redeemer.tokens_to_passes,
+            store=configless.store,
         )
 
         group = pass_factory.get(u"message", num_passes)
@@ -110,3 +96,116 @@ class PassGroupTests(TestCase):
                 ),
             ),
         )
+
+    def _test_token_group_operation(
+            self,
+            operation,
+            matches_tokens,
+            voucher,
+            num_passes,
+            now,
+            random,
+            data,
+    ):
+        configless = self.useFixture(
+            ConfiglessMemoryVoucherStore(
+                DummyRedeemer(),
+                lambda: now,
+            ),
+        )
+        # Make sure there are enough tokens for us to use!
+        self.assertThat(
+            configless.redeem(voucher, num_passes),
+            succeeded(Always()),
+        )
+
+        # Figure out some subset, maybe empty, of passes from the group that
+        # we will try to operate on.
+        group_size = data.draw(integers(min_value=0, max_value=num_passes))
+        indices = range(num_passes)
+        random.shuffle(indices)
+        spent_indices = indices[:group_size]
+
+        # Get some passes and perform the operation.
+        pass_factory = SpendingController.for_store(
+            tokens_to_passes=configless.redeemer.tokens_to_passes,
+            store=configless.store,
+        )
+        group = pass_factory.get(u"message", num_passes)
+        spent, rest = group.split(spent_indices)
+        operation(spent)
+
+        # Verify the expected outcome of the operation using the supplied
+        # matcher factory.
+        self.assertThat(
+            configless.store,
+            matches_tokens(num_passes, spent),
+        )
+
+    @given(vouchers(), pass_counts(), posix_safe_datetimes(), randoms(), data())
+    def test_spent(self, voucher, num_passes, now, random, data):
+        """
+        Passes in a group can be marked as successfully spent to prevent them from
+        being re-used by a future ``get`` call.
+        """
+        def matches_tokens(num_passes, group):
+            return AfterPreprocessing(
+                # The use of `backup` here to check is questionable.  TODO:
+                # Straight-up query interface for tokens in different states.
+                lambda store: store.backup()[u"unblinded-tokens"],
+                HasLength(num_passes - len(group.passes)),
+            )
+        return self._test_token_group_operation(
+            lambda group: group.mark_spent(),
+            matches_tokens,
+            voucher,
+            num_passes,
+            now,
+            random,
+            data,
+        )
+
+    @given(vouchers(), pass_counts(), posix_safe_datetimes(), randoms(), data())
+    def test_invalid(self, voucher, num_passes, now, random, data):
+        """
+        Passes in a group can be marked as invalid to prevent them from being
+        re-used by a future ``get`` call.
+        """
+        def matches_tokens(num_passes, group):
+            return AfterPreprocessing(
+                # The use of `backup` here to check is questionable.  TODO:
+                # Straight-up query interface for tokens in different states.
+                lambda store: store.backup()[u"unblinded-tokens"],
+                HasLength(num_passes - len(group.passes)),
+            )
+        return self._test_token_group_operation(
+            lambda group: group.mark_invalid(u"reason"),
+            matches_tokens,
+            voucher,
+            num_passes,
+            now,
+            random,
+            data,
+        )
+
+    @given(vouchers(), pass_counts(), posix_safe_datetimes(), randoms(), data())
+    def test_reset(self, voucher, num_passes, now, random, data):
+        """
+        Passes in a group can be reset to allow them to be re-used by a future
+        ``get`` call.
+        """
+        def matches_tokens(num_passes, group):
+            return AfterPreprocessing(
+                # They've been reset so we should be able to re-get them.
+                lambda store: store.get_unblinded_tokens(len(group.passes)),
+                Equals(group.unblinded_tokens),
+            )
+        return self._test_token_group_operation(
+            lambda group: group.reset(),
+            matches_tokens,
+            voucher,
+            num_passes,
+            now,
+            random,
+            data,
+        )