diff --git a/.circleci/config.yml b/.circleci/config.yml
index 7949f83138c5727bec74d23e6f8c372cc94184f4..d33849e40ed5093b194e471b335020955ffee8b6 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -106,8 +106,8 @@ jobs:
             # If nixpkgs changes then potentially a lot of cached packages for
             # the base system will be invalidated so we may as well drop them
             # and make a new cache with the new packages.
-            - zkapauthorizer-nix-store-v3-{{ checksum "nixpkgs.rev" }}
-            - zkapauthorizer-nix-store-v3-
+            - zkapauthorizer-nix-store-v4-{{ checksum "nixpkgs.rev" }}
+            - zkapauthorizer-nix-store-v4-
 
       - run:
           name: "Run Test Suite"
@@ -122,7 +122,10 @@ jobs:
             #
             # Further, we want the "doc" output built as well because that's
             # where the coverage data ends up.
-            nix-build --argstr hypothesisProfile ci --arg collectCoverage true --attr doc
+            #
+            # Also limit the number of concurrent jobs because of resource
+            # constraints on CircleCI. :/
+            nix-build --cores 1 --max-jobs 1 --argstr hypothesisProfile ci --arg collectCoverage true --attr doc
 
       - run:
           name: "Cache codecov"
@@ -136,15 +139,14 @@ jobs:
 
       - save_cache:
           name: "Cache Nix Store Paths"
-          key: zkapauthorizer-nix-store-v3-{{ checksum "nixpkgs.rev" }}
+          key: zkapauthorizer-nix-store-v4-{{ checksum "nixpkgs.rev" }}
           paths:
             - "/nix"
 
       - run:
           name: "Report Coverage"
           command: |
-            nix-shell -p 'python.withPackages (ps: [ ps.codecov ])' --run \
-              'codecov --file ./result-doc/share/doc/*/.coverage'
+            ./.circleci/report-coverage.sh
 
 workflows:
   version: 2
diff --git a/.circleci/report-coverage.sh b/.circleci/report-coverage.sh
new file mode 100755
index 0000000000000000000000000000000000000000..3f15868363d5f8b3c1db8f1b6d591e5862efe971
--- /dev/null
+++ b/.circleci/report-coverage.sh
@@ -0,0 +1,9 @@
+#! /usr/bin/env nix-shell
+#! nix-shell -i bash -p "python.withPackages (ps: [ ps.codecov ])"
+set -x
+find ./result-doc/share/doc
+cp ./result-doc/share/doc/*/.coverage.* ./
+python -m coverage combine
+python -m coverage report
+python -m coverage xml
+codecov --file coverage.xml
diff --git a/.coveragerc b/.coveragerc
index ec19cb88f397c327bac1afa6a195073f305e3e3d..4d27672886ff83d00efb47c63e2aa77e3819d1c3 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -3,4 +3,33 @@ source =
     _zkapauthorizer
     twisted.plugins.zkapauthorizer
 
+# Measuring branch coverage is slower (so the conventional wisdom goes) but
+# too bad: it's an important part of the coverage information.
 branch = True
+
+# Whether or not we actually collect coverage information in parallel, we need
+# to have the coverage data files written according to the "parallel" naming
+# scheme so that we can use "coverage combine" later to rewrite paths in the
+# coverage report.
+parallel = True
+
+omit =
+# The Versioneer version file in the repository is generated by
+# Versioneer.  Let's call it Versioneer's responsibility to ensure it
+# works and not pay attention to our test suite's coverage of it.  Also,
+# the way Versioneer works is that the source file in the repository is
+# different from the source file in an installation - which is where we
+# measure coverage.  When the source files differ like this, it's very
+# difficult to produce a coherent coverage report (measurements against
+# one source file are meaningless when looking at a different source
+# file).
+    */_zkapauthorizer/_version.py
+
+[paths]
+source =
+# It looks like this in the checkout
+    src/
+# It looks like this in the Nix build environment
+    /nix/store/*/lib/python*/site-packages/
+# It looks like this in the Windows build environment
+    C:\hostedtoolcache\windows\Python\2.7.18\x64\Lib\site-packages\
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index b64117daf8257230e49ce1c955088a44707702b1..c8995972397b0b4eee5e00b527d99897cf39050c 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -37,23 +37,19 @@ jobs:
 
     - name: "Upgrade Pip"
       run: |
-        python -m pip install --upgrade pip
+        python -m pip install -v --upgrade pip
 
     - name: "Install CI Dependencies"
       run: |
-        python -m pip install wheel coverage
-
-    - name: "Install Tahoe-LAFS master@HEAD"
-      run: |
-        python -m pip install git+https://github.com/tahoe-lafs/tahoe-lafs@master#egg=tahoe-lafs
+        python -m pip install -v wheel coverage
 
     - name: "Install Test Dependencies"
       run: |
-        python -m pip install -r test-requirements.txt
+        python -m pip install -v -r test-requirements.txt
 
     - name: "Install ZKAPAuthorizer"
       run: |
-        python -m pip install ./
+        python -m pip install -v ./
 
     - name: "Dump Python Environment"
       run: |
@@ -63,11 +59,17 @@ jobs:
       env:
         MAGIC_FOLDER_HYPOTHESIS_PROFILE: "ci"
       run: |
-        python -m coverage run -m twisted.trial _zkapauthorizer
-
+        python -m coverage run --debug=config -m twisted.trial _zkapauthorizer
 
     - name: "Convert Coverage"
       run: |
+        echo "Files:"
+        dir
+        echo "Combining"
+        coverage combine
+        echo "Reporting"
+        coverage report
+        echo "Converting to XML"
         coverage xml
 
     - uses: codecov/codecov-action@v1
diff --git a/docs/source/configuration.rst b/docs/source/configuration.rst
index cf88d3c48de1e9306be711f9260465db41508e44..b3e672fe52b76bc335481ad7adfe4e76d39cb974 100644
--- a/docs/source/configuration.rst
+++ b/docs/source/configuration.rst
@@ -35,6 +35,13 @@ For example::
 Note that ``ristretto-issuer-root-url`` must agree with whichever storage servers the client will be configured to interact with.
 If the values are not the same, the client will decline to use the storage servers.
 
+The client can also be configured with the value of a single pass::
+
+    [storageclient.plugins.privatestorageio-zkapauthz-v1]
+    pass-value = 1048576
+
+The value given here must agree with the value servers use in their configuration or the storage service will be unusable.
+
 Server
 ------
 
@@ -49,6 +56,14 @@ Then also configure the Ristretto-flavored PrivacyPass issuer the server will an
   [storageserver.plugins.privatestorageio-zkapauthz-v1]
   ristretto-issuer-root-url = https://issuer.example.invalid/
 
+The value of a single pass in the system can be configured here as well::
+
+  [storageserver.plugins.privatestorageio-zkapauthz-v1]
+  pass-value = 1048576
+
+If no ``pass-value`` is given then a default will be used.
+The value given here must agree with the value clients use in their configuration or the storage service will be unusable.
+
 The storage server must also be configured with the path to the Ristretto-flavored PrivacyPass signing key.
 To avoid placing secret material in tahoe.cfg,
 this configuration is done using a path::
diff --git a/eliot.nix b/eliot.nix
deleted file mode 100644
index f6d6b3061b1ea635bac0e694be407ca8d1b6befb..0000000000000000000000000000000000000000
--- a/eliot.nix
+++ /dev/null
@@ -1,27 +0,0 @@
-{ lib, buildPythonPackage, fetchPypi, zope_interface, pyrsistent, boltons
-, hypothesis, testtools, pytest }:
-buildPythonPackage rec {
-  pname = "eliot";
-  version = "1.7.0";
-
-  src = fetchPypi {
-    inherit pname version;
-    sha256 = "0ylyycf717s5qsrx8b9n6m38vyj2k8328lfhn8y6r31824991wv8";
-  };
-
-  postPatch = ''
-    substituteInPlace setup.py \
-      --replace "boltons >= 19.0.1" boltons
-    # depends on eliot.prettyprint._main which we don't have here.
-    rm eliot/tests/test_prettyprint.py
-  '';
-
-  checkInputs = [ testtools pytest hypothesis ];
-  propagatedBuildInputs = [ zope_interface pyrsistent boltons ];
-
-  meta = with lib; {
-    homepage = https://github.com/itamarst/eliot/;
-    description = "Logging library that tells you why it happened";
-    license = licenses.asl20;
-  };
-}
diff --git a/nix/setup.cfg.patch b/nix/setup.cfg.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c2cdeea1617d10691e9b151d1b3b1308c451952f
--- /dev/null
+++ b/nix/setup.cfg.patch
@@ -0,0 +1,13 @@
+diff --git a/setup.cfg b/setup.cfg
+index dfc49607..822ea8dd 100644
+--- a/setup.cfg
++++ b/setup.cfg
+@@ -40,7 +40,7 @@ install_requires =
+     # incompatible with Tahoe-LAFS'.  So duplicate them here (the ones that
+     # have been observed to cause problems).
+     Twisted[tls,conch]>=18.4.0
+-    tahoe-lafs==1.14.0
++    tahoe-lafs
+     treq
+ 
+ [versioneer]
diff --git a/overlays.nix b/overlays.nix
index ed2d8d1af449fcc410a0182b9d26e5b2a868e858..3d548fade895e7a390596fd56861d37ed753c5dc 100644
--- a/overlays.nix
+++ b/overlays.nix
@@ -15,12 +15,9 @@ self: super: {
       # 2 support.
       typing = python-self.callPackage ./typing.nix { };
 
-      # new tahoe-lafs dependency
-      eliot = python-self.callPackage ./eliot.nix { };
-
       # tahoe-lafs in nixpkgs is packaged as an application!  so we have to
       # re-package it ourselves as a library.
-      tahoe-lafs = python-self.callPackage ((import ./tahoe-lafs.nix) + "/nix") { };
+      tahoe-lafs = python-self.callPackage ./tahoe-lafs.nix { };
 
       # we depend on the challenge-bypass-ristretto python package, a set of
       # bindings to the challenge-bypass-ristretto Rust crate.
diff --git a/setup.cfg b/setup.cfg
index 51670eea1a8b93fa39c6191b37715f9b166e1c18..dfc4960747ba2d57bea9b12deff72f59371cd743 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -33,11 +33,14 @@ packages =
 install_requires =
     attrs
     zope.interface
+    eliot
     aniso8601
     python-challenge-bypass-ristretto
-    # Inherit our Twisted dependency from tahoe-lafs so we don't accidentally
-    # get the extras wrong here and break stuff.
-    tahoe-lafs
+    # The pip resolver sometimes finds treq's dependencies first and these are
+    # incompatible with Tahoe-LAFS'.  So duplicate them here (the ones that
+    # have been observed to cause problems).
+    Twisted[tls,conch]>=18.4.0
+    tahoe-lafs==1.14.0
     treq
 
 [versioneer]
diff --git a/src/_zkapauthorizer/_plugin.py b/src/_zkapauthorizer/_plugin.py
index 2ed966d8af44e357e789feb8bfc38bb2b92ef4c4..ee57951537cb0800d74412e9e8579f0a7ac279df 100644
--- a/src/_zkapauthorizer/_plugin.py
+++ b/src/_zkapauthorizer/_plugin.py
@@ -45,6 +45,11 @@ from twisted.internet.defer import (
     succeed,
 )
 
+from eliot import (
+    MessageType,
+    Field,
+)
+
 from allmydata.interfaces import (
     IFoolscapStoragePlugin,
     IAnnounceableStorageServer,
@@ -71,7 +76,10 @@ from .model import (
 from .resource import (
     from_configuration as resource_from_configuration,
 )
-
+from .storage_common import (
+    BYTES_PER_PASS,
+    get_configured_pass_value,
+)
 from .controller import (
     get_redeemer,
 )
@@ -83,6 +91,24 @@ from .lease_maintenance import (
 
 _log = Logger()
 
+PRIVACYPASS_MESSAGE = Field(
+    u"message",
+    unicode,
+    u"The PrivacyPass request-binding data associated with a pass.",
+)
+
+PASS_COUNT = Field(
+    u"count",
+    int,
+    u"A number of passes.",
+)
+
+GET_PASSES = MessageType(
+    u"zkapauthorizer:get-passes",
+    [PRIVACYPASS_MESSAGE, PASS_COUNT],
+    u"Passes are being spent.",
+)
+
 @implementer(IAnnounceableStorageServer)
 @attr.s
 class AnnounceableStorageServer(object):
@@ -134,6 +160,7 @@ class ZKAPAuthorizer(object):
     def get_storage_server(self, configuration, get_anonymous_storage_server):
         kwargs = configuration.copy()
         root_url = kwargs.pop(u"ristretto-issuer-root-url")
+        pass_value = kwargs.pop(u"pass-value", BYTES_PER_PASS)
         signing_key = SigningKey.decode_base64(
             FilePath(
                 kwargs.pop(u"ristretto-signing-key-path"),
@@ -144,7 +171,8 @@ class ZKAPAuthorizer(object):
         }
         storage_server = ZKAPAuthorizerStorageServer(
             get_anonymous_storage_server(),
-            signing_key,
+            pass_value=pass_value,
+            signing_key=signing_key,
             **kwargs
         )
         return succeed(
@@ -167,9 +195,15 @@ class ZKAPAuthorizer(object):
         extract_unblinded_tokens = self._get_store(node_config).extract_unblinded_tokens
         def get_passes(message, count):
             unblinded_tokens = extract_unblinded_tokens(count)
-            return redeemer.tokens_to_passes(message, unblinded_tokens)
+            passes = redeemer.tokens_to_passes(message, unblinded_tokens)
+            GET_PASSES.log(
+                message=message,
+                count=count,
+            )
+            return passes
 
         return ZKAPAuthorizerStorageClient(
+            get_configured_pass_value(node_config),
             get_rref,
             get_passes,
         )
diff --git a/src/_zkapauthorizer/_storage_client.py b/src/_zkapauthorizer/_storage_client.py
index a51fbfd1402c74e0f95ebddbd325e99f7ea076f4..6559b732e6a1bcd67396dea3d561162f2ce31c5c 100644
--- a/src/_zkapauthorizer/_storage_client.py
+++ b/src/_zkapauthorizer/_storage_client.py
@@ -34,7 +34,7 @@ from allmydata.interfaces import (
 )
 
 from .storage_common import (
-    BYTES_PER_PASS,
+    pass_value_attribute,
     required_passes,
     allocate_buckets_message,
     add_lease_message,
@@ -92,7 +92,7 @@ class ZKAPAuthorizerStorageClient(object):
     _expected_remote_interface_name = (
         "RIPrivacyPassAuthorizedStorageServer.tahoe.privatestorage.io"
     )
-
+    _pass_value = pass_value_attribute()
     _get_rref = attr.ib()
     _get_passes = attr.ib()
 
@@ -148,7 +148,7 @@ class ZKAPAuthorizerStorageClient(object):
             "allocate_buckets",
             self._get_encoded_passes(
                 allocate_buckets_message(storage_index),
-                required_passes(BYTES_PER_PASS, [allocated_size] * len(sharenums)),
+                required_passes(self._pass_value, [allocated_size] * len(sharenums)),
             ),
             storage_index,
             renew_secret,
@@ -179,7 +179,7 @@ class ZKAPAuthorizerStorageClient(object):
             storage_index,
             None,
         )).values()
-        num_passes = required_passes(BYTES_PER_PASS, share_sizes)
+        num_passes = required_passes(self._pass_value, share_sizes)
         # print("Adding lease to {!r} with sizes {} with {} passes".format(
         #     storage_index,
         #     share_sizes,
@@ -206,7 +206,7 @@ class ZKAPAuthorizerStorageClient(object):
             storage_index,
             None,
         )).values()
-        num_passes = required_passes(BYTES_PER_PASS, share_sizes)
+        num_passes = required_passes(self._pass_value, share_sizes)
         returnValue((
             yield self._rref.callRemote(
                 "renew_lease",
@@ -265,6 +265,7 @@ class ZKAPAuthorizerStorageClient(object):
             )
             # Determine the cost of the new storage for the operation.
             required_new_passes = get_required_new_passes_for_mutable_write(
+                self._pass_value,
                 current_sizes,
                 tw_vectors,
             )
diff --git a/src/_zkapauthorizer/_storage_server.py b/src/_zkapauthorizer/_storage_server.py
index d8c747b451bd1b95ed841ace69094b92e6b2df53..7aa17c840a705ffb15a656a8f85befda42836781 100644
--- a/src/_zkapauthorizer/_storage_server.py
+++ b/src/_zkapauthorizer/_storage_server.py
@@ -42,6 +42,7 @@ from os.path import (
 )
 from os import (
     listdir,
+    stat,
 )
 from datetime import (
     timedelta,
@@ -86,7 +87,7 @@ from .foolscap import (
     RIPrivacyPassAuthorizedStorageServer,
 )
 from .storage_common import (
-    BYTES_PER_PASS,
+    pass_value_attribute,
     required_passes,
     allocate_buckets_message,
     add_lease_message,
@@ -153,6 +154,7 @@ class ZKAPAuthorizerStorageServer(Referenceable):
     LEASE_PERIOD = timedelta(days=31)
 
     _original = attr.ib(validator=provides(RIStorageServer))
+    _pass_value = pass_value_attribute()
     _signing_key = attr.ib(validator=instance_of(SigningKey))
     _clock = attr.ib(
         validator=provides(IReactorTime),
@@ -217,7 +219,12 @@ class ZKAPAuthorizerStorageServer(Referenceable):
             allocate_buckets_message(storage_index),
             passes,
         )
-        check_pass_quantity_for_write(len(valid_passes), sharenums, allocated_size)
+        check_pass_quantity_for_write(
+            self._pass_value,
+            len(valid_passes),
+            sharenums,
+            allocated_size,
+        )
 
         return self._original.remote_allocate_buckets(
             storage_index,
@@ -243,6 +250,7 @@ class ZKAPAuthorizerStorageServer(Referenceable):
         # print("server add_lease({}, {!r})".format(len(passes), storage_index))
         valid_passes = self._validate_passes(add_lease_message(storage_index), passes)
         check_pass_quantity_for_lease(
+            self._pass_value,
             storage_index,
             valid_passes,
             self._original,
@@ -256,6 +264,7 @@ class ZKAPAuthorizerStorageServer(Referenceable):
         """
         valid_passes = self._validate_passes(renew_lease_message(storage_index), passes)
         check_pass_quantity_for_lease(
+            self._pass_value,
             storage_index,
             valid_passes,
             self._original,
@@ -324,6 +333,7 @@ class ZKAPAuthorizerStorageServer(Referenceable):
                 renew_leases = True
 
             required_new_passes = get_required_new_passes_for_mutable_write(
+                self._pass_value,
                 current_sizes,
                 tw_vectors,
             )
@@ -372,23 +382,7 @@ def has_active_lease(storage_server, storage_index, now):
     )
 
 
-def check_pass_quantity_for_lease(storage_index, valid_passes, storage_server):
-    """
-    Check that the given number of passes is sufficient to add or renew a
-    lease for one period for the given storage index.
-    """
-    allocated_sizes = dict(
-        get_share_sizes(
-            storage_server,
-            storage_index,
-            list(get_all_share_numbers(storage_server, storage_index)),
-        ),
-    ).values()
-    # print("allocated_sizes: {}".format(allocated_sizes))
-    check_pass_quantity(len(valid_passes), allocated_sizes)
-    # print("Checked out")
-
-def check_pass_quantity(valid_count, share_sizes):
+def check_pass_quantity(pass_value, valid_count, share_sizes):
     """
     Check that the given number of passes is sufficient to cover leases for
     one period for shares of the given sizes.
@@ -402,14 +396,30 @@ def check_pass_quantity(valid_count, share_sizes):
 
     :return: ``None`` if the given number of passes is sufficient.
     """
-    required_pass_count = required_passes(BYTES_PER_PASS, share_sizes)
+    required_pass_count = required_passes(pass_value, share_sizes)
     if valid_count < required_pass_count:
         raise MorePassesRequired(
             valid_count,
             required_pass_count,
         )
 
-def check_pass_quantity_for_write(valid_count, sharenums, allocated_size):
+
+def check_pass_quantity_for_lease(pass_value, storage_index, valid_passes, storage_server):
+    """
+    Check that the given number of passes is sufficient to add or renew a
+    lease for one period for the given storage index.
+    """
+    allocated_sizes = dict(
+        get_share_sizes(
+            storage_server,
+            storage_index,
+            list(get_all_share_numbers(storage_server, storage_index)),
+        ),
+    ).values()
+    check_pass_quantity(pass_value, len(valid_passes), allocated_sizes)
+
+
+def check_pass_quantity_for_write(pass_value, valid_count, sharenums, allocated_size):
     """
     Determine if the given number of valid passes is sufficient for an
     attempted write.
@@ -423,7 +433,7 @@ def check_pass_quantity_for_write(valid_count, sharenums, allocated_size):
 
     :return: ``None`` if the number of valid passes given is sufficient.
     """
-    check_pass_quantity(valid_count, [allocated_size] * len(sharenums))
+    check_pass_quantity(pass_value, valid_count, [allocated_size] * len(sharenums))
 
 
 def get_all_share_paths(storage_server, storage_index):
@@ -508,23 +518,50 @@ def get_storage_index_share_size(sharepath):
 
     :return int: The data size of the share in bytes.
     """
-    # Note Tahoe-LAFS immutable/layout.py makes some claims about how the
-    # share data is structured.  A lot of this seems to be wrong.
-    # storage/immutable.py appears to have the correct information.
-    fmt = ">LL"
+    # From src/allmydata/storage/immutable.py
+    #
+    # The share file has the following layout:
+    #  0x00: share file version number, four bytes, current version is 1
+    #  0x04: share data length, four bytes big-endian = A # See Footnote 1 below.
+    #  0x08: number of leases, four bytes big-endian
+    #  0x0c: beginning of share data (see immutable.layout.WriteBucketProxy)
+    #  A+0x0c = B: first lease. Lease format is:
+    #   B+0x00: owner number, 4 bytes big-endian, 0 is reserved for no-owner
+    #   B+0x04: renew secret, 32 bytes (SHA256)
+    #   B+0x24: cancel secret, 32 bytes (SHA256)
+    #   B+0x44: expiration time, 4 bytes big-endian seconds-since-epoch
+    #   B+0x48: next lease, or end of record
+    #
+    # Footnote 1: as of Tahoe v1.3.0 this field is not used by storage
+    # servers, but it is still filled in by storage servers in case the
+    # storage server software gets downgraded from >= Tahoe v1.3.0 to < Tahoe
+    # v1.3.0, or the share file is moved from one storage server to
+    # another. The value stored in this field is truncated, so if the actual
+    # share data length is >= 2**32, then the value stored in this field will
+    # be the actual share data length modulo 2**32.
+
+    share_file_size = stat(sharepath).st_size
+    header_format = ">LLL"
+    header_size = calcsize(header_format)
     with open(sharepath, "rb") as share_file:
-        header = share_file.read(calcsize(fmt))
+        header = share_file.read(calcsize(header_format))
 
-    if len(header) != calcsize(fmt):
+    if len(header) != header_size:
         raise ValueError(
             "Tried to read {} bytes of share file header, got {!r} instead.".format(
-                calcsize(fmt),
+                calcsize(header_format),
                 header,
             ),
         )
 
-    version, share_data_length = unpack(fmt, header)
-    return share_data_length
+    version, _, number_of_leases = unpack(header_format, header)
+
+    if version != 1:
+        raise ValueError(
+            "Cannot interpret version {} share file.".format(version),
+        )
+
+    return share_file_size - header_size - (number_of_leases * (4 + 32 + 32 + 4))
 
 
 def get_lease_expiration(get_leases, storage_index_or_slot):
diff --git a/src/_zkapauthorizer/controller.py b/src/_zkapauthorizer/controller.py
index cdba78ec48c7c6200a4f2571903f88db0ad72949..3b0511c914144a70506a738097c9bdd36d2c16fd 100644
--- a/src/_zkapauthorizer/controller.py
+++ b/src/_zkapauthorizer/controller.py
@@ -854,10 +854,6 @@ class PaymentController(object):
             # Reload state before each iteration.  We expect it to change each time.
             voucher_obj = self.store.get(voucher)
 
-            if not voucher_obj.state.should_start_redemption():
-                # An earlier iteration may have encountered a fatal error.
-                break
-
             succeeded = yield self._perform_redeem(voucher_obj, counter, tokens)
             if not succeeded:
                 self._log.info(
diff --git a/src/_zkapauthorizer/model.py b/src/_zkapauthorizer/model.py
index 679352e8f5369c23e5bf93a1717db95e8a6e5de7..b7d590bec3e26bf7ac8f9c288ed92fad88079e62 100644
--- a/src/_zkapauthorizer/model.py
+++ b/src/_zkapauthorizer/model.py
@@ -27,9 +27,6 @@ from json import (
 from datetime import (
     datetime,
 )
-from base64 import (
-    b64decode,
-)
 from zope.interface import (
     Interface,
     implementer,
@@ -56,8 +53,15 @@ from ._base64 import (
     urlsafe_b64decode,
 )
 
+from .validators import (
+    is_base64_encoded,
+    has_length,
+    greater_than,
+)
+
 from .storage_common import (
-    BYTES_PER_PASS,
+    pass_value_attribute,
+    get_configured_pass_value,
     required_passes,
 )
 
@@ -171,6 +175,8 @@ class VoucherStore(object):
     """
     _log = Logger()
 
+    pass_value = pass_value_attribute()
+
     database_path = attr.ib(validator=attr.validators.instance_of(FilePath))
     now = attr.ib()
 
@@ -196,6 +202,7 @@ class VoucherStore(object):
             connect=connect,
         )
         return cls(
+            get_configured_pass_value(node_config),
             db_path,
             now,
             conn,
@@ -514,7 +521,7 @@ class VoucherStore(object):
 
         :return LeaseMaintenance: A new, started lease maintenance object.
         """
-        m = LeaseMaintenance(self.now, self._connection)
+        m = LeaseMaintenance(self.pass_value, self.now, self._connection)
         m.start()
         return m
 
@@ -558,6 +565,8 @@ class LeaseMaintenance(object):
     the ``observe`` and ``finish`` methods to persist state about a lease
     maintenance run.
 
+    :ivar int _pass_value: The value of a single ZKAP in byte-months.
+
     :ivar _now: A no-argument callable which returns a datetime giving a time
         to use as current.
 
@@ -568,6 +577,7 @@ class LeaseMaintenance(object):
         objects, the database row id that corresponds to the started run.
         This is used to make sure future updates go to the right row.
     """
+    _pass_value = pass_value_attribute()
     _now = attr.ib()
     _connection = attr.ib()
     _rowid = attr.ib(default=None)
@@ -594,7 +604,7 @@ class LeaseMaintenance(object):
         """
         Record a storage shares of the given sizes.
         """
-        count = required_passes(BYTES_PER_PASS, sizes)
+        count = required_passes(self._pass_value, sizes)
         cursor.execute(
             """
             UPDATE [lease-maintenance-spending]
@@ -637,46 +647,6 @@ class LeaseMaintenanceActivity(object):
 # x = store.get_latest_lease_maintenance_activity()
 # xs.started, xs.passes_required, xs.finished
 
-def is_base64_encoded(b64decode=b64decode):
-    def validate_is_base64_encoded(inst, attr, value):
-        try:
-            b64decode(value.encode("ascii"))
-        except (TypeError, Error):
-            raise TypeError(
-                "{name!r} must be base64 encoded unicode, (got {value!r})".format(
-                    name=attr.name,
-                    value=value,
-                ),
-            )
-    return validate_is_base64_encoded
-
-def has_length(expected):
-    def validate_has_length(inst, attr, value):
-        if len(value) != expected:
-            raise ValueError(
-                "{name!r} must have length {expected}, instead has length {actual}".format(
-                    name=attr.name,
-                    expected=expected,
-                    actual=len(value),
-                ),
-            )
-    return validate_has_length
-
-def greater_than(expected):
-    def validate_relation(inst, attr, value):
-        if value > expected:
-            return None
-
-        raise ValueError(
-            "{name!r} must be greater than {expected}, instead it was {actual}".format(
-                name=attr.name,
-                expected=expected,
-                actual=value,
-            ),
-        )
-    return validate_relation
-
-
 @attr.s(frozen=True)
 class UnblindedToken(object):
     """
diff --git a/src/_zkapauthorizer/storage_common.py b/src/_zkapauthorizer/storage_common.py
index 9bf9435e69e5429cf7bdf596d7e1b18fe0472da1..f00997b1c6db9b240eebd7dade935c5c6dc8e917 100644
--- a/src/_zkapauthorizer/storage_common.py
+++ b/src/_zkapauthorizer/storage_common.py
@@ -24,6 +24,12 @@ from base64 import (
     b64encode,
 )
 
+import attr
+
+from .validators import (
+    greater_than,
+)
+
 def _message_maker(label):
     def make_message(storage_index):
         return u"{label} {storage_index}".format(
@@ -41,7 +47,22 @@ slot_testv_and_readv_and_writev_message = _message_maker(u"slot_testv_and_readv_
 
 # The number of bytes we're willing to store for a lease period for each pass
 # submitted.
-BYTES_PER_PASS = 128 * 1024
+BYTES_PER_PASS = 1024 * 1024
+
+def get_configured_pass_value(node_config):
+    """
+    Determine the configuration-specified value of a single ZKAP.
+
+    If no value is explicitly configured, a default value is returned.  The
+    value is read from the **pass-value** option of the ZKAPAuthorizer plugin
+    client section.
+    """
+    section_name = u"storageclient.plugins.privatestorageio-zkapauthz-v1"
+    return int(node_config.get_config(
+        section=section_name,
+        option=u"pass-value",
+        default=BYTES_PER_PASS,
+    ))
 
 def required_passes(bytes_per_pass, share_sizes):
     """
@@ -136,10 +157,13 @@ def get_implied_data_length(data_vector, new_length):
     return min(new_length, data_based_size)
 
 
-def get_required_new_passes_for_mutable_write(current_sizes, tw_vectors):
+def get_required_new_passes_for_mutable_write(pass_value, current_sizes, tw_vectors):
+    """
+    :param int pass_value: The value of a single pass in byte-months.
+    """
     # print("get_required_new_passes_for_mutable_write({}, {})".format(current_sizes, summarize(tw_vectors)))
     current_passes = required_passes(
-        BYTES_PER_PASS,
+        pass_value,
         current_sizes.values(),
     )
 
@@ -155,7 +179,7 @@ def get_required_new_passes_for_mutable_write(current_sizes, tw_vectors):
 
     new_sizes.update()
     new_passes = required_passes(
-        BYTES_PER_PASS,
+        pass_value,
         new_sizes.values(),
     )
     required_new_passes = new_passes - current_passes
@@ -180,3 +204,14 @@ def summarize(tw_vectors):
         for (sharenum, (test_vector, data_vectors, new_length))
         in tw_vectors.items()
     }
+
+def pass_value_attribute():
+    """
+    Define an attribute for an attrs-based object which can hold a pass value.
+    """
+    return attr.ib(
+        validator=attr.validators.and_(
+            attr.validators.instance_of((int, long)),
+            greater_than(0),
+        ),
+    )
diff --git a/src/_zkapauthorizer/tests/eliot.py b/src/_zkapauthorizer/tests/eliot.py
new file mode 100644
index 0000000000000000000000000000000000000000..710737d948cc4e069d12c265277e95dee569e133
--- /dev/null
+++ b/src/_zkapauthorizer/tests/eliot.py
@@ -0,0 +1,90 @@
+# Copyright 2019 PrivateStorage.io, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Eliot testing helpers.
+"""
+
+from __future__ import (
+    absolute_import,
+)
+
+from functools import (
+    wraps,
+)
+
+from unittest import (
+    SkipTest,
+)
+
+from eliot import (
+    MemoryLogger,
+)
+
+from eliot.testing import (
+    swap_logger,
+    check_for_errors,
+)
+
+# validate_logging and capture_logging copied from Eliot around 1.11.  We
+# can't upgrade past 1.7 because we're not Python 3 compatible.
+def validate_logging(assertion, *assertionArgs, **assertionKwargs):
+    def decorator(function):
+        @wraps(function)
+        def wrapper(self, *args, **kwargs):
+            skipped = False
+
+            kwargs["logger"] = logger = MemoryLogger()
+            self.addCleanup(check_for_errors, logger)
+            # TestCase runs cleanups in reverse order, and we want this to
+            # run *before* tracebacks are checked:
+            if assertion is not None:
+                self.addCleanup(
+                    lambda: skipped
+                    or assertion(self, logger, *assertionArgs, **assertionKwargs)
+                )
+            try:
+                return function(self, *args, **kwargs)
+            except SkipTest:
+                skipped = True
+                raise
+
+        return wrapper
+
+    return decorator
+
+
+def capture_logging(assertion, *assertionArgs, **assertionKwargs):
+    """
+    Capture and validate all logging that doesn't specify a L{Logger}.
+
+    See L{validate_logging} for details on the rest of its behavior.
+    """
+
+    def decorator(function):
+        @validate_logging(assertion, *assertionArgs, **assertionKwargs)
+        @wraps(function)
+        def wrapper(self, *args, **kwargs):
+            logger = kwargs["logger"]
+            previous_logger = swap_logger(logger)
+
+            def cleanup():
+                swap_logger(previous_logger)
+
+            self.addCleanup(cleanup)
+            return function(self, *args, **kwargs)
+
+        return wrapper
+
+    return decorator
diff --git a/src/_zkapauthorizer/tests/storage_common.py b/src/_zkapauthorizer/tests/storage_common.py
index 4baf4de7b89ff6ab3c0ea7145b5d0fa347f3a8e9..d00a580c29adf51f1d39583012fbe09b11555678 100644
--- a/src/_zkapauthorizer/tests/storage_common.py
+++ b/src/_zkapauthorizer/tests/storage_common.py
@@ -16,6 +16,13 @@
 ``allmydata.storage``-related helpers shared across the test suite.
 """
 
+from os import (
+    SEEK_CUR,
+)
+from struct import (
+    pack,
+)
+
 from twisted.python.filepath import (
     FilePath,
 )
@@ -25,6 +32,9 @@ from .strategies import (
     bytes_for_share,
 )
 
+# Hard-coded in Tahoe-LAFS
+LEASE_INTERVAL = 60 * 60 * 24 * 31
+
 def cleanup_storage_server(storage_server):
     """
     Delete all of the shares held by the given storage server.
@@ -73,3 +83,53 @@ def write_toy_shares(
     for (sharenum, writer) in allocated.items():
         writer.remote_write(0, bytes_for_share(sharenum, size))
         writer.remote_close()
+
+
+def whitebox_write_sparse_share(sharepath, version, size, leases, now):
+    """
+    Write a zero-filled sparse (if the filesystem supports it) immutable share
+    to the given path.
+
+    This assumes knowledge of the Tahoe-LAFS share file format.
+
+    :param FilePath sharepath: The path to which to write the share file.
+    :param int version: The share version to write to the file.
+    :param int size: The share data size to write.
+    :param list leases: Renewal secrets for leases to write to the share file.
+    :param float now: The current time as a POSIX timestamp.
+    """
+    # Maybe-saturated size (what at least one Tahoe-LAFS comment claims is
+    # appropriate for large files)
+    internal_size = min(size, 2 ** 32 - 1)
+    apparent_size = size
+
+    header_format = ">LLL"
+    lease_format = ">L32s32sL"
+    with sharepath.open("wb") as share:
+        share.write(
+            pack(
+                header_format,
+                version,
+                internal_size,
+                len(leases),
+            ),
+        )
+        # Try to make it sparse by skipping all the data.
+        share.seek(apparent_size - 1, SEEK_CUR),
+        share.write(b"\0")
+        share.write(
+            b"".join(
+                pack(
+                    lease_format,
+                    # no owner
+                    0,
+                    renew,
+                    # no cancel secret
+                    b"",
+                    # expiration timestamp
+                    int(now + LEASE_INTERVAL),
+                )
+                for renew
+                in leases
+            ),
+        )
diff --git a/src/_zkapauthorizer/tests/strategies.py b/src/_zkapauthorizer/tests/strategies.py
index 13ea72ac349a1b1d04aaf44d45c148260ad7359d..28028fd87ad78348725343f9ac19bf710c6eb040 100644
--- a/src/_zkapauthorizer/tests/strategies.py
+++ b/src/_zkapauthorizer/tests/strategies.py
@@ -533,6 +533,13 @@ def write_enabler_secrets():
     )
 
 
+def share_versions():
+    """
+    Build integers which could be Tahoe-LAFS share file version numbers.
+    """
+    return integers(min_value=0, max_value=2 ** 32 - 1)
+
+
 def sharenums():
     """
     Build Tahoe-LAFS share numbers.
@@ -554,17 +561,19 @@ def sharenum_sets():
     )
 
 
-def sizes():
+def sizes(
+    # Size 0 data isn't data, it's nothing.
+    min_value=1,
+    # Let this be larger than a single segment (2 ** 17) in case that matters
+    # to Tahoe-LAFS storage at all.  I don't think it does, though.
+    max_value=2 ** 18,
+):
     """
     Build Tahoe-LAFS share sizes.
     """
     return integers(
-        # Size 0 data isn't data, it's nothing.
-        min_value=1,
-        # Let this be larger than a single segment (2 ** 17) in case that
-        # matters to Tahoe-LAFS storage at all.  I don't think it does,
-        # though.
-        max_value=2 ** 18,
+        min_value=min_value,
+        max_value=max_value,
     )
 
 
diff --git a/src/_zkapauthorizer/tests/test_client_resource.py b/src/_zkapauthorizer/tests/test_client_resource.py
index 65b0286ed6cf839ddd0b7c78b983036cab98b340..319f64bea658fae129ccfd11b8deab8f4da71547 100644
--- a/src/_zkapauthorizer/tests/test_client_resource.py
+++ b/src/_zkapauthorizer/tests/test_client_resource.py
@@ -135,7 +135,6 @@ from ..resource import (
 )
 
 from ..storage_common import (
-    BYTES_PER_PASS,
     required_passes,
 )
 
@@ -578,7 +577,7 @@ class UnblindedTokenTests(TestCase):
         total = 0
         activity = root.store.start_lease_maintenance()
         for sizes in size_observations:
-            total += required_passes(BYTES_PER_PASS, sizes)
+            total += required_passes(root.store.pass_value, sizes)
             activity.observe(sizes)
         activity.finish()
 
diff --git a/src/_zkapauthorizer/tests/test_controller.py b/src/_zkapauthorizer/tests/test_controller.py
index d133a3d72133cba18f649704b59ea420f17a40d1..3e8d6fce78ef80963977ce3137121c325b34c415 100644
--- a/src/_zkapauthorizer/tests/test_controller.py
+++ b/src/_zkapauthorizer/tests/test_controller.py
@@ -215,6 +215,41 @@ class PaymentControllerTests(TestCase):
     """
     Tests for ``PaymentController``.
     """
+    @given(tahoe_configs(), datetimes(), vouchers())
+    def test_should_not_redeem(self, get_config, now, voucher):
+        """
+        ``PaymentController.redeem`` raises ``ValueError`` if passed a voucher in
+        a state when redemption should not be started.
+        """
+        store = self.useFixture(TemporaryVoucherStore(get_config, lambda: now)).store
+        controller = PaymentController(
+            store,
+            DummyRedeemer(),
+            default_token_count=100,
+        )
+
+        self.assertThat(
+            controller.redeem(voucher),
+            succeeded(Always()),
+        )
+
+        # Sanity check.  It should be redeemed now.
+        voucher_obj = controller.get_voucher(voucher)
+        self.assertThat(
+            voucher_obj.state.should_start_redemption(),
+            Equals(False),
+        )
+
+        self.assertThat(
+            controller.redeem(voucher),
+            failed(
+                AfterPreprocessing(
+                    lambda f: f.type,
+                    Equals(ValueError),
+                ),
+            ),
+        )
+
     @given(tahoe_configs(), datetimes(), vouchers())
     def test_not_redeemed_while_redeeming(self, get_config, now, voucher):
         """
diff --git a/src/_zkapauthorizer/tests/test_model.py b/src/_zkapauthorizer/tests/test_model.py
index ca0d3c600ed8bff92593ffb1b08f81d1ffb15e73..1670c542e7253198b4dda8fb9a1ff9e511d21df1 100644
--- a/src/_zkapauthorizer/tests/test_model.py
+++ b/src/_zkapauthorizer/tests/test_model.py
@@ -69,10 +69,6 @@ from twisted.python.runtime import (
     platform,
 )
 
-from ..storage_common import (
-    BYTES_PER_PASS,
-)
-
 from ..model import (
     StoreOpenError,
     NotEnoughTokens,
@@ -409,8 +405,11 @@ class LeaseMaintenanceTests(TestCase):
                     tuples(
                         # The activity itself, in pass count
                         integers(min_value=1, max_value=2 ** 16 - 1),
-                        # Amount by which to trim back the share sizes
-                        integers(min_value=0, max_value=BYTES_PER_PASS - 1),
+                        # Amount by which to trim back the share sizes.  This
+                        # might exceed the value of a single pass but we don't
+                        # know that value yet.  We'll map it into a coherent
+                        # range with mod inside the test.
+                        integers(min_value=0),
                     ),
                 ),
                 # How much time passes before this activity finishes
@@ -436,8 +435,9 @@ class LeaseMaintenanceTests(TestCase):
             passes_required = 0
             for (num_passes, trim_size) in sizes:
                 passes_required += num_passes
+                trim_size %= store.pass_value
                 x.observe([
-                    num_passes * BYTES_PER_PASS - trim_size,
+                    num_passes * store.pass_value - trim_size,
                 ])
             now += finish_delay
             x.finish()
diff --git a/src/_zkapauthorizer/tests/test_plugin.py b/src/_zkapauthorizer/tests/test_plugin.py
index 18dcebc3ddaef76e26fbd04386acda5d66503bbe..ebd714863a3ab95a590826698001ba4cac469965 100644
--- a/src/_zkapauthorizer/tests/test_plugin.py
+++ b/src/_zkapauthorizer/tests/test_plugin.py
@@ -41,7 +41,12 @@ from testtools import (
 from testtools.matchers import (
     Always,
     Contains,
+    Equals,
     AfterPreprocessing,
+    MatchesAll,
+    HasLength,
+    AllMatch,
+    ContainsDict,
 )
 from testtools.twistedsupport import (
     succeeded,
@@ -79,6 +84,10 @@ from allmydata.client import (
     create_client_from_config,
 )
 
+from eliot.testing import (
+    LoggedMessage,
+)
+
 from twisted.python.filepath import (
     FilePath,
 )
@@ -95,6 +104,10 @@ from twisted.plugins.zkapauthorizer import (
     storage_server,
 )
 
+from .._plugin import (
+    GET_PASSES,
+)
+
 from ..foolscap import (
     RIPrivacyPassAuthorizedStorageServer,
 )
@@ -108,8 +121,8 @@ from ..controller import (
     DummyRedeemer,
 )
 from ..storage_common import (
-    BYTES_PER_PASS,
     required_passes,
+    allocate_buckets_message,
 )
 from .._storage_client import (
     IncorrectStorageServerReference,
@@ -143,6 +156,11 @@ from .foolscap import (
     DummyReferenceable,
 )
 
+from .eliot import (
+    capture_logging,
+)
+
+
 
 SIGNING_KEY_PATH = FilePath(__file__).sibling(u"testing-signing.key")
 
@@ -386,18 +404,20 @@ class ClientPluginTests(TestCase):
         )
 
     @given(
-        tahoe_configs_with_dummy_redeemer,
-        datetimes(),
-        announcements(),
-        vouchers(),
-        storage_indexes(),
-        lease_renew_secrets(),
-        lease_cancel_secrets(),
-        sharenum_sets(),
-        sizes(),
+        get_config=tahoe_configs_with_dummy_redeemer,
+        now=datetimes(),
+        announcement=announcements(),
+        voucher=vouchers(),
+        storage_index=storage_indexes(),
+        renew_secret=lease_renew_secrets(),
+        cancel_secret=lease_cancel_secrets(),
+        sharenums=sharenum_sets(),
+        size=sizes(),
     )
+    @capture_logging(lambda self, logger: logger.validate())
     def test_unblinded_tokens_extracted(
             self,
+            logger,
             get_config,
             now,
             announcement,
@@ -418,16 +438,16 @@ class ClientPluginTests(TestCase):
             b"tub.port",
         )
 
+        store = VoucherStore.from_node_config(node_config, lambda: now)
         # Give it enough for the allocate_buckets call below.
-        token_count = required_passes(BYTES_PER_PASS, [size] * len(sharenums))
+        expected_pass_cost = required_passes(store.pass_value, [size] * len(sharenums))
         # And few enough redemption groups given the number of tokens.
-        num_redemption_groups = token_count
+        num_redemption_groups = expected_pass_cost
 
-        store = VoucherStore.from_node_config(node_config, lambda: now)
         controller = PaymentController(
             store,
             DummyRedeemer(),
-            default_token_count=token_count,
+            default_token_count=expected_pass_cost,
             num_redemption_groups=num_redemption_groups,
         )
         # Get a token inserted into the store.
@@ -460,6 +480,23 @@ class ClientPluginTests(TestCase):
             raises(NotEnoughTokens),
         )
 
+        messages = LoggedMessage.of_type(logger.messages, GET_PASSES)
+        self.assertThat(
+            messages,
+            MatchesAll(
+                HasLength(1),
+                AllMatch(
+                    AfterPreprocessing(
+                        lambda logged_message: logged_message.message,
+                        ContainsDict({
+                            u"message": Equals(allocate_buckets_message(storage_index)),
+                            u"count": Equals(expected_pass_cost),
+                        }),
+                    ),
+                ),
+            ),
+        )
+
 
 class ClientResourceTests(TestCase):
     """
diff --git a/src/_zkapauthorizer/tests/test_storage_protocol.py b/src/_zkapauthorizer/tests/test_storage_protocol.py
index 713a0c3e862dddfbf803e409898043f0cd562532..a267c0667f3fe1b3101ab9e5a7a9eb10d8091a32 100644
--- a/src/_zkapauthorizer/tests/test_storage_protocol.py
+++ b/src/_zkapauthorizer/tests/test_storage_protocol.py
@@ -70,6 +70,10 @@ from challenge_bypass_ristretto import (
     random_signing_key,
 )
 
+from allmydata.storage.common import (
+    storage_index_to_dir,
+)
+
 from .common import (
     skipIf,
 )
@@ -82,6 +86,7 @@ from .strategies import (
     lease_renew_secrets,
     lease_cancel_secrets,
     write_enabler_secrets,
+    share_versions,
     sharenums,
     sharenum_sets,
     sizes,
@@ -97,8 +102,10 @@ from .fixtures import (
     AnonymousStorageServer,
 )
 from .storage_common import (
+    LEASE_INTERVAL,
     cleanup_storage_server,
     write_toy_shares,
+    whitebox_write_sparse_share,
 )
 from .foolscap import (
     LocalRemote,
@@ -119,7 +126,6 @@ from ..foolscap import (
     ShareStat,
 )
 
-
 class RequiredPassesTests(TestCase):
     """
     Tests for ``required_passes``.
@@ -167,6 +173,8 @@ class ShareTests(TestCase):
         iteration of the test so far, probably; so make relative comparisons
         instead of absolute ones).
     """
+    pass_value = 128 * 1024
+
     def setUp(self):
         super(ShareTests, self).setUp()
         self.canary = LocalReferenceable(None)
@@ -187,10 +195,12 @@ class ShareTests(TestCase):
             )
         self.server = ZKAPAuthorizerStorageServer(
             self.anonymous_storage_server,
+            self.pass_value,
             self.signing_key,
         )
         self.local_remote_server = LocalRemote(self.server)
         self.client = ZKAPAuthorizerStorageClient(
+            self.pass_value,
             get_rref=lambda: self.local_remote_server,
             get_passes=get_passes,
         )
@@ -349,19 +359,7 @@ class ShareTests(TestCase):
             Equals(int(now + self.server.LEASE_PERIOD.total_seconds())),
         )
 
-    @given(
-        storage_index=storage_indexes(),
-        renew_secret=lease_renew_secrets(),
-        cancel_secret=lease_cancel_secrets(),
-        sharenum=sharenums(),
-        size=sizes(),
-        clock=clocks(),
-    )
-    def test_stat_shares_immutable(self, storage_index, renew_secret, cancel_secret, sharenum, size, clock):
-        """
-        Size and lease information about immutable shares can be retrieved from a
-        storage server.
-        """
+    def _stat_shares_immutable_test(self, storage_index, sharenum, size, clock, leases, write_shares):
         # Hypothesis causes our storage server to be used many times.  Clean
         # up between iterations.
         cleanup_storage_server(self.anonymous_storage_server)
@@ -372,23 +370,27 @@ class ShareTests(TestCase):
         try:
             patch.setUp()
             # Create a share we can toy with.
-            write_toy_shares(
+            write_shares(
                 self.anonymous_storage_server,
                 storage_index,
-                renew_secret,
-                cancel_secret,
                 {sharenum},
                 size,
                 canary=self.canary,
             )
+            # Perhaps put some more leases on it.  Leases might impact our
+            # ability to determine share data size.
+            for renew_secret in leases:
+                self.anonymous_storage_server.remote_add_lease(
+                    storage_index,
+                    renew_secret,
+                    b"",
+                )
         finally:
             patch.cleanUp()
 
         stats = extract_result(
             self.client.stat_shares([storage_index]),
         )
-        # Hard-coded in Tahoe-LAFS
-        LEASE_INTERVAL = 60 * 60 * 24 * 31
         expected = [{
             sharenum: ShareStat(
                 size=size,
@@ -400,6 +402,172 @@ class ShareTests(TestCase):
             Equals(expected),
         )
 
+    @given(
+        storage_index=storage_indexes(),
+        renew_secret=lease_renew_secrets(),
+        cancel_secret=lease_cancel_secrets(),
+        sharenum=sharenums(),
+        size=sizes(),
+        clock=clocks(),
+        leases=lists(lease_renew_secrets(), unique=True),
+    )
+    def test_stat_shares_immutable(self, storage_index, renew_secret, cancel_secret, sharenum, size, clock, leases):
+        """
+        Size and lease information about immutable shares can be retrieved from a
+        storage server.
+        """
+        return self._stat_shares_immutable_test(
+            storage_index,
+            sharenum,
+            size,
+            clock,
+            leases,
+            lambda storage_server, storage_index, sharenums, size, canary: write_toy_shares(
+                storage_server,
+                storage_index,
+                renew_secret,
+                cancel_secret,
+                sharenums,
+                size,
+                canary,
+            ),
+        )
+
+    @given(
+        storage_index=storage_indexes(),
+        sharenum=sharenums(),
+        size=sizes(),
+        clock=clocks(),
+        leases=lists(lease_renew_secrets(), unique=True, min_size=1),
+        version=share_versions(),
+    )
+    def test_stat_shares_immutable_wrong_version(self, storage_index, sharenum, size, clock, leases, version):
+        """
+        If a share file with an unexpected version is found, ``stat_shares``
+        declines to offer a result (by raising ``ValueError``).
+        """
+        assume(version != 1)
+
+        # Hypothesis causes our storage server to be used many times.  Clean
+        # up between iterations.
+        cleanup_storage_server(self.anonymous_storage_server)
+
+        sharedir = FilePath(self.anonymous_storage_server.sharedir).preauthChild(
+            # storage_index_to_dir likes to return multiple segments
+            # joined by pathsep
+            storage_index_to_dir(storage_index),
+        )
+        sharepath = sharedir.child(u"{}".format(sharenum))
+        sharepath.parent().makedirs()
+        whitebox_write_sparse_share(
+            sharepath,
+            version=version,
+            size=size,
+            leases=leases,
+            now=clock.seconds(),
+        )
+
+        self.assertThat(
+            self.client.stat_shares([storage_index]),
+            failed(
+                AfterPreprocessing(
+                    lambda f: f.value,
+                    IsInstance(ValueError),
+                ),
+            ),
+        )
+
+    @given(
+        storage_index=storage_indexes(),
+        sharenum=sharenums(),
+        size=sizes(),
+        clock=clocks(),
+        version=share_versions(),
+        # Encode our knowledge of the share header format and size right here...
+        position=integers(min_value=0, max_value=11),
+    )
+    def test_stat_shares_truncated_file(self, storage_index, sharenum, size, clock, version, position):
+        """
+        If a share file is truncated in the middle of the header,
+        ``stat_shares`` declines to offer a result (by raising
+        ``ValueError``).
+        """
+        # Hypothesis causes our storage server to be used many times.  Clean
+        # up between iterations.
+        cleanup_storage_server(self.anonymous_storage_server)
+
+        sharedir = FilePath(self.anonymous_storage_server.sharedir).preauthChild(
+            # storage_index_to_dir likes to return multiple segments
+            # joined by pathsep
+            storage_index_to_dir(storage_index),
+        )
+        sharepath = sharedir.child(u"{}".format(sharenum))
+        sharepath.parent().makedirs()
+        whitebox_write_sparse_share(
+            sharepath,
+            version=version,
+            size=size,
+            # We know leases are at the end, where they'll get chopped off, so
+            # we don't bother to write any.
+            leases=[],
+            now=clock.seconds(),
+        )
+        with sharepath.open("wb") as fobj:
+            fobj.truncate(position)
+
+        self.assertThat(
+            self.client.stat_shares([storage_index]),
+            failed(
+                AfterPreprocessing(
+                    lambda f: f.value,
+                    IsInstance(ValueError),
+                ),
+            ),
+        )
+
+
+    @skipIf(platform.isWindows(), "Creating large files on Windows (no sparse files) is too slow")
+    @given(
+        storage_index=storage_indexes(),
+        sharenum=sharenums(),
+        size=sizes(min_value=2 ** 18, max_value=2 ** 40),
+        clock=clocks(),
+        leases=lists(lease_renew_secrets(), unique=True, min_size=1),
+    )
+    def test_stat_shares_immutable_large(self, storage_index, sharenum, size, clock, leases):
+        """
+        Size and lease information about very large immutable shares can be
+        retrieved from a storage server.
+
+        This is more of a whitebox test.  It assumes knowledge of Tahoe-LAFS
+        share placement and layout.  This is necessary to avoid having to
+        write real multi-gigabyte files to exercise the behavior.
+        """
+        def write_shares(storage_server, storage_index, sharenums, size, canary):
+            sharedir = FilePath(storage_server.sharedir).preauthChild(
+                # storage_index_to_dir likes to return multiple segments
+                # joined by pathsep
+                storage_index_to_dir(storage_index),
+            )
+            for sharenum in sharenums:
+                sharepath = sharedir.child(u"{}".format(sharenum))
+                sharepath.parent().makedirs()
+                whitebox_write_sparse_share(
+                    sharepath,
+                    version=1,
+                    size=size,
+                    leases=leases,
+                    now=clock.seconds(),
+                )
+
+        return self._stat_shares_immutable_test(
+            storage_index,
+            sharenum,
+            size,
+            clock,
+            leases,
+            write_shares,
+        )
 
     @skipIf(platform.isWindows(), "Storage server miscomputes slot size on Windows")
     @given(
@@ -450,8 +618,6 @@ class ShareTests(TestCase):
         stats = extract_result(
             self.client.stat_shares([storage_index]),
         )
-        # Hard-coded in Tahoe-LAFS
-        LEASE_INTERVAL = 60 * 60 * 24 * 31
         expected = [{
             sharenum: ShareStat(
                 size=get_implied_data_length(
diff --git a/src/_zkapauthorizer/tests/test_storage_server.py b/src/_zkapauthorizer/tests/test_storage_server.py
index 55f4402da118c6eac3bf7717a6a690c742c3d835..88ae5a1f1294bc0679787942f7432aa7e08d2291 100644
--- a/src/_zkapauthorizer/tests/test_storage_server.py
+++ b/src/_zkapauthorizer/tests/test_storage_server.py
@@ -92,7 +92,6 @@ from ..api import (
     MorePassesRequired,
 )
 from ..storage_common import (
-    BYTES_PER_PASS,
     required_passes,
     allocate_buckets_message,
     add_lease_message,
@@ -107,6 +106,8 @@ class PassValidationTests(TestCase):
     """
     Tests for pass validation performed by ``ZKAPAuthorizerStorageServer``.
     """
+    pass_value = 128 * 1024
+
     @skipIf(platform.isWindows(), "Storage server is not supported on Windows")
     def setUp(self):
         super(PassValidationTests, self).setUp()
@@ -119,6 +120,7 @@ class PassValidationTests(TestCase):
         self.signing_key = random_signing_key()
         self.storage_server = ZKAPAuthorizerStorageServer(
             self.anonymous_storage_server,
+            self.pass_value,
             self.signing_key,
             self.clock,
         )
@@ -162,7 +164,7 @@ class PassValidationTests(TestCase):
 
         required_passes = 2
         share_nums = {3, 7}
-        allocated_size = int((required_passes * BYTES_PER_PASS) / len(share_nums))
+        allocated_size = int((required_passes * self.pass_value) / len(share_nums))
         storage_index = b"0123456789"
         renew_secret = b"x" * 32
         cancel_secret = b"y" * 32
@@ -250,7 +252,7 @@ class PassValidationTests(TestCase):
         :param make_data_vector: A one-argument callable.  It will be called
             with the current length of a slot share.  It should return a write
             vector which will increase the storage requirements of that slot
-            share by at least BYTES_PER_PASS.
+            share by at least ``self.pass_value``.
         """
         # hypothesis causes our storage server to be used many times.  Clean
         # up between iterations.
@@ -266,6 +268,7 @@ class PassValidationTests(TestCase):
 
         # print("test suite")
         required_pass_count = get_required_new_passes_for_mutable_write(
+            self.pass_value,
             dict.fromkeys(tw_vectors.keys(), 0),
             tw_vectors,
         )
@@ -355,7 +358,7 @@ class PassValidationTests(TestCase):
             test_and_write_vectors_for_shares,
             lambda current_length: (
                 [],
-                [(current_length, "x" * BYTES_PER_PASS)],
+                [(current_length, "x" * self.pass_value)],
                 None,
             ),
         )
@@ -387,7 +390,7 @@ class PassValidationTests(TestCase):
 
         renew_secret, cancel_secret = secrets
 
-        required_count = required_passes(BYTES_PER_PASS, [allocated_size] * len(sharenums))
+        required_count = required_passes(self.pass_value, [allocated_size] * len(sharenums))
         # Create some shares at a slot which will require lease renewal.
         write_toy_shares(
             self.anonymous_storage_server,
@@ -524,6 +527,7 @@ class PassValidationTests(TestCase):
 
         # Create an initial share to toy with.
         required_pass_count = get_required_new_passes_for_mutable_write(
+            self.pass_value,
             dict.fromkeys(tw_vectors.keys(), 0),
             tw_vectors,
         )
diff --git a/src/_zkapauthorizer/validators.py b/src/_zkapauthorizer/validators.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd1545144b3a9ed39d10c656ccd9ebbbde549804
--- /dev/null
+++ b/src/_zkapauthorizer/validators.py
@@ -0,0 +1,60 @@
+# Copyright 2019 PrivateStorage.io, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This module implements validators for ``attrs``-defined attributes.
+"""
+
+from base64 import (
+    b64decode,
+)
+
+def is_base64_encoded(b64decode=b64decode):
+    def validate_is_base64_encoded(inst, attr, value):
+        try:
+            b64decode(value.encode("ascii"))
+        except TypeError:
+            raise TypeError(
+                "{name!r} must be base64 encoded unicode, (got {value!r})".format(
+                    name=attr.name,
+                    value=value,
+                ),
+            )
+    return validate_is_base64_encoded
+
+def has_length(expected):
+    def validate_has_length(inst, attr, value):
+        if len(value) != expected:
+            raise ValueError(
+                "{name!r} must have length {expected}, instead has length {actual}".format(
+                    name=attr.name,
+                    expected=expected,
+                    actual=len(value),
+                ),
+            )
+    return validate_has_length
+
+def greater_than(expected):
+    def validate_relation(inst, attr, value):
+        if value > expected:
+            return None
+
+        raise ValueError(
+            "{name!r} must be greater than {expected}, instead it was {actual}".format(
+                name=attr.name,
+                expected=expected,
+                actual=value,
+            ),
+        )
+    return validate_relation
diff --git a/tahoe-lafs-repo.nix b/tahoe-lafs-repo.nix
new file mode 100644
index 0000000000000000000000000000000000000000..fc944e5739e7246a922761b2c7abaf05b62b8327
--- /dev/null
+++ b/tahoe-lafs-repo.nix
@@ -0,0 +1,9 @@
+let
+  pkgs = import <nixpkgs> {};
+in
+  pkgs.fetchFromGitHub {
+    owner = "tahoe-lafs";
+    repo = "tahoe-lafs";
+    rev = "tahoe-lafs-1.14.0";
+    sha256 = "1ahdiapg57g6icv7p2wbzgkwl9lzdlgrsvbm5485414m7z2d6las";
+  }
diff --git a/tahoe-lafs.nix b/tahoe-lafs.nix
index c305267b8914012921a9896a2e42ef2560f42a38..212439638fb54e1dfb57d5ae91784759e234d02e 100644
--- a/tahoe-lafs.nix
+++ b/tahoe-lafs.nix
@@ -1,9 +1,20 @@
+{ python2Packages }:
 let
-  pkgs = import <nixpkgs> {};
+  # Manually assemble the tahoe-lafs build inputs because tahoe-lafs 1.14.0
+  # eliot package runs the eliot test suite which is flaky.  Doing this gives
+  # us a place to insert a `doCheck = false` (at the cost of essentially
+  # duplicating tahoe-lafs' default.nix).  Not ideal but at least we can throw
+  # it away when we upgrade to the next tahoe-lafs version.
+  repo = ((import ./tahoe-lafs-repo.nix) + "/nix");
+  nevow-drv = repo + "/nevow.nix";
+  nevow = python2Packages.callPackage nevow-drv { };
+  eliot-drv = repo + "/eliot.nix";
+  eliot = (python2Packages.callPackage eliot-drv { }).overrideAttrs (old: {
+    doInstallCheck = false;
+  });
+  tahoe-lafs-drv = repo + "/tahoe-lafs.nix";
+  tahoe-lafs = python2Packages.callPackage tahoe-lafs-drv {
+    inherit nevow eliot;
+  };
 in
-  pkgs.fetchFromGitHub {
-    owner = "tahoe-lafs";
-    repo = "tahoe-lafs";
-    rev = "34aeefd3ddbf28dafbc3477e52461eafa53b545d";
-    sha256 = "0l8n4njbzgiwmn3qsmvzyzqlb0y9bj9g2jvpdynvsn1ggxrqmvsq";
-  }
\ No newline at end of file
+  tahoe-lafs
diff --git a/zkapauthorizer.nix b/zkapauthorizer.nix
index 5b113d7357028fd3e38017eaf6cf46eb60cee3df..89639e0d41ec84db5811503dd150f9c33250d0a3 100644
--- a/zkapauthorizer.nix
+++ b/zkapauthorizer.nix
@@ -27,10 +27,18 @@ buildPythonPackage rec {
     sphinx
   ];
 
+  patches = [
+    # Remove the Tahoe-LAFS version pin in distutils config.  We have our own
+    # pinning and also our Tahoe-LAFS package has a bogus version number. :/
+    ./nix/setup.cfg.patch
+  ];
+
   propagatedBuildInputs = [
     attrs
     zope_interface
     aniso8601
+    # Inherit eliot from tahoe-lafs
+    # eliot
     twisted
     tahoe-lafs
     challenge-bypass-ristretto
@@ -48,7 +56,7 @@ buildPythonPackage rec {
     runHook preCheck
     "${pyflakes}/bin/pyflakes" src/_zkapauthorizer
     ZKAPAUTHORIZER_HYPOTHESIS_PROFILE=${hypothesisProfile'} python -m ${if collectCoverage
-      then "coverage run --branch --source _zkapauthorizer,twisted.plugins.zkapauthorizer --module"
+      then "coverage run --debug=config --module"
       else ""
     } twisted.trial ${extraTrialArgs} ${testSuite'}
     runHook postCheck
@@ -56,10 +64,8 @@ buildPythonPackage rec {
 
   postCheck = if collectCoverage
     then ''
-    python -m coverage html
     mkdir -p "$doc/share/doc/${name}"
-    cp -vr .coverage htmlcov "$doc/share/doc/${name}"
-    python -m coverage report
+    cp -v .coverage.* "$doc/share/doc/${name}"
     ''
     else "";
 }