diff --git a/.circleci/config.yml b/.circleci/config.yml index 7949f83138c5727bec74d23e6f8c372cc94184f4..d33849e40ed5093b194e471b335020955ffee8b6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -106,8 +106,8 @@ jobs: # If nixpkgs changes then potentially a lot of cached packages for # the base system will be invalidated so we may as well drop them # and make a new cache with the new packages. - - zkapauthorizer-nix-store-v3-{{ checksum "nixpkgs.rev" }} - - zkapauthorizer-nix-store-v3- + - zkapauthorizer-nix-store-v4-{{ checksum "nixpkgs.rev" }} + - zkapauthorizer-nix-store-v4- - run: name: "Run Test Suite" @@ -122,7 +122,10 @@ jobs: # # Further, we want the "doc" output built as well because that's # where the coverage data ends up. - nix-build --argstr hypothesisProfile ci --arg collectCoverage true --attr doc + # + # Also limit the number of concurrent jobs because of resource + # constraints on CircleCI. :/ + nix-build --cores 1 --max-jobs 1 --argstr hypothesisProfile ci --arg collectCoverage true --attr doc - run: name: "Cache codecov" @@ -136,15 +139,14 @@ jobs: - save_cache: name: "Cache Nix Store Paths" - key: zkapauthorizer-nix-store-v3-{{ checksum "nixpkgs.rev" }} + key: zkapauthorizer-nix-store-v4-{{ checksum "nixpkgs.rev" }} paths: - "/nix" - run: name: "Report Coverage" command: | - nix-shell -p 'python.withPackages (ps: [ ps.codecov ])' --run \ - 'codecov --file ./result-doc/share/doc/*/.coverage' + ./.circleci/report-coverage.sh workflows: version: 2 diff --git a/.circleci/report-coverage.sh b/.circleci/report-coverage.sh new file mode 100755 index 0000000000000000000000000000000000000000..3f15868363d5f8b3c1db8f1b6d591e5862efe971 --- /dev/null +++ b/.circleci/report-coverage.sh @@ -0,0 +1,9 @@ +#! /usr/bin/env nix-shell +#! nix-shell -i bash -p "python.withPackages (ps: [ ps.codecov ])" +set -x +find ./result-doc/share/doc +cp ./result-doc/share/doc/*/.coverage.* ./ +python -m coverage combine +python -m coverage report +python -m coverage xml +codecov --file coverage.xml diff --git a/.coveragerc b/.coveragerc index ec19cb88f397c327bac1afa6a195073f305e3e3d..4d27672886ff83d00efb47c63e2aa77e3819d1c3 100644 --- a/.coveragerc +++ b/.coveragerc @@ -3,4 +3,33 @@ source = _zkapauthorizer twisted.plugins.zkapauthorizer +# Measuring branch coverage is slower (so the conventional wisdom goes) but +# too bad: it's an important part of the coverage information. branch = True + +# Whether or not we actually collect coverage information in parallel, we need +# to have the coverage data files written according to the "parallel" naming +# scheme so that we can use "coverage combine" later to rewrite paths in the +# coverage report. +parallel = True + +omit = +# The Versioneer version file in the repository is generated by +# Versioneer. Let's call it Versioneer's responsibility to ensure it +# works and not pay attention to our test suite's coverage of it. Also, +# the way Versioneer works is that the source file in the repository is +# different from the source file in an installation - which is where we +# measure coverage. When the source files differ like this, it's very +# difficult to produce a coherent coverage report (measurements against +# one source file are meaningless when looking at a different source +# file). + */_zkapauthorizer/_version.py + +[paths] +source = +# It looks like this in the checkout + src/ +# It looks like this in the Nix build environment + /nix/store/*/lib/python*/site-packages/ +# It looks like this in the Windows build environment + C:\hostedtoolcache\windows\Python\2.7.18\x64\Lib\site-packages\ diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index b64117daf8257230e49ce1c955088a44707702b1..c8995972397b0b4eee5e00b527d99897cf39050c 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -37,23 +37,19 @@ jobs: - name: "Upgrade Pip" run: | - python -m pip install --upgrade pip + python -m pip install -v --upgrade pip - name: "Install CI Dependencies" run: | - python -m pip install wheel coverage - - - name: "Install Tahoe-LAFS master@HEAD" - run: | - python -m pip install git+https://github.com/tahoe-lafs/tahoe-lafs@master#egg=tahoe-lafs + python -m pip install -v wheel coverage - name: "Install Test Dependencies" run: | - python -m pip install -r test-requirements.txt + python -m pip install -v -r test-requirements.txt - name: "Install ZKAPAuthorizer" run: | - python -m pip install ./ + python -m pip install -v ./ - name: "Dump Python Environment" run: | @@ -63,11 +59,17 @@ jobs: env: MAGIC_FOLDER_HYPOTHESIS_PROFILE: "ci" run: | - python -m coverage run -m twisted.trial _zkapauthorizer - + python -m coverage run --debug=config -m twisted.trial _zkapauthorizer - name: "Convert Coverage" run: | + echo "Files:" + dir + echo "Combining" + coverage combine + echo "Reporting" + coverage report + echo "Converting to XML" coverage xml - uses: codecov/codecov-action@v1 diff --git a/eliot.nix b/eliot.nix deleted file mode 100644 index f6d6b3061b1ea635bac0e694be407ca8d1b6befb..0000000000000000000000000000000000000000 --- a/eliot.nix +++ /dev/null @@ -1,27 +0,0 @@ -{ lib, buildPythonPackage, fetchPypi, zope_interface, pyrsistent, boltons -, hypothesis, testtools, pytest }: -buildPythonPackage rec { - pname = "eliot"; - version = "1.7.0"; - - src = fetchPypi { - inherit pname version; - sha256 = "0ylyycf717s5qsrx8b9n6m38vyj2k8328lfhn8y6r31824991wv8"; - }; - - postPatch = '' - substituteInPlace setup.py \ - --replace "boltons >= 19.0.1" boltons - # depends on eliot.prettyprint._main which we don't have here. - rm eliot/tests/test_prettyprint.py - ''; - - checkInputs = [ testtools pytest hypothesis ]; - propagatedBuildInputs = [ zope_interface pyrsistent boltons ]; - - meta = with lib; { - homepage = https://github.com/itamarst/eliot/; - description = "Logging library that tells you why it happened"; - license = licenses.asl20; - }; -} diff --git a/nix/setup.cfg.patch b/nix/setup.cfg.patch new file mode 100644 index 0000000000000000000000000000000000000000..c2cdeea1617d10691e9b151d1b3b1308c451952f --- /dev/null +++ b/nix/setup.cfg.patch @@ -0,0 +1,13 @@ +diff --git a/setup.cfg b/setup.cfg +index dfc49607..822ea8dd 100644 +--- a/setup.cfg ++++ b/setup.cfg +@@ -40,7 +40,7 @@ install_requires = + # incompatible with Tahoe-LAFS'. So duplicate them here (the ones that + # have been observed to cause problems). + Twisted[tls,conch]>=18.4.0 +- tahoe-lafs==1.14.0 ++ tahoe-lafs + treq + + [versioneer] diff --git a/overlays.nix b/overlays.nix index ed2d8d1af449fcc410a0182b9d26e5b2a868e858..3d548fade895e7a390596fd56861d37ed753c5dc 100644 --- a/overlays.nix +++ b/overlays.nix @@ -15,12 +15,9 @@ self: super: { # 2 support. typing = python-self.callPackage ./typing.nix { }; - # new tahoe-lafs dependency - eliot = python-self.callPackage ./eliot.nix { }; - # tahoe-lafs in nixpkgs is packaged as an application! so we have to # re-package it ourselves as a library. - tahoe-lafs = python-self.callPackage ((import ./tahoe-lafs.nix) + "/nix") { }; + tahoe-lafs = python-self.callPackage ./tahoe-lafs.nix { }; # we depend on the challenge-bypass-ristretto python package, a set of # bindings to the challenge-bypass-ristretto Rust crate. diff --git a/setup.cfg b/setup.cfg index 371a19b343c204fb576b2c6d8c77d969160d40e3..dfc4960747ba2d57bea9b12deff72f59371cd743 100644 --- a/setup.cfg +++ b/setup.cfg @@ -36,9 +36,11 @@ install_requires = eliot aniso8601 python-challenge-bypass-ristretto - # Inherit our Twisted dependency from tahoe-lafs so we don't accidentally - # get the extras wrong here and break stuff. - tahoe-lafs + # The pip resolver sometimes finds treq's dependencies first and these are + # incompatible with Tahoe-LAFS'. So duplicate them here (the ones that + # have been observed to cause problems). + Twisted[tls,conch]>=18.4.0 + tahoe-lafs==1.14.0 treq [versioneer] diff --git a/src/_zkapauthorizer/_storage_server.py b/src/_zkapauthorizer/_storage_server.py index a2d4b9f2c5cce038eff98184ae4859cbde8f8b81..7aa17c840a705ffb15a656a8f85befda42836781 100644 --- a/src/_zkapauthorizer/_storage_server.py +++ b/src/_zkapauthorizer/_storage_server.py @@ -42,6 +42,7 @@ from os.path import ( ) from os import ( listdir, + stat, ) from datetime import ( timedelta, @@ -517,23 +518,50 @@ def get_storage_index_share_size(sharepath): :return int: The data size of the share in bytes. """ - # Note Tahoe-LAFS immutable/layout.py makes some claims about how the - # share data is structured. A lot of this seems to be wrong. - # storage/immutable.py appears to have the correct information. - fmt = ">LL" + # From src/allmydata/storage/immutable.py + # + # The share file has the following layout: + # 0x00: share file version number, four bytes, current version is 1 + # 0x04: share data length, four bytes big-endian = A # See Footnote 1 below. + # 0x08: number of leases, four bytes big-endian + # 0x0c: beginning of share data (see immutable.layout.WriteBucketProxy) + # A+0x0c = B: first lease. Lease format is: + # B+0x00: owner number, 4 bytes big-endian, 0 is reserved for no-owner + # B+0x04: renew secret, 32 bytes (SHA256) + # B+0x24: cancel secret, 32 bytes (SHA256) + # B+0x44: expiration time, 4 bytes big-endian seconds-since-epoch + # B+0x48: next lease, or end of record + # + # Footnote 1: as of Tahoe v1.3.0 this field is not used by storage + # servers, but it is still filled in by storage servers in case the + # storage server software gets downgraded from >= Tahoe v1.3.0 to < Tahoe + # v1.3.0, or the share file is moved from one storage server to + # another. The value stored in this field is truncated, so if the actual + # share data length is >= 2**32, then the value stored in this field will + # be the actual share data length modulo 2**32. + + share_file_size = stat(sharepath).st_size + header_format = ">LLL" + header_size = calcsize(header_format) with open(sharepath, "rb") as share_file: - header = share_file.read(calcsize(fmt)) + header = share_file.read(calcsize(header_format)) - if len(header) != calcsize(fmt): + if len(header) != header_size: raise ValueError( "Tried to read {} bytes of share file header, got {!r} instead.".format( - calcsize(fmt), + calcsize(header_format), header, ), ) - version, share_data_length = unpack(fmt, header) - return share_data_length + version, _, number_of_leases = unpack(header_format, header) + + if version != 1: + raise ValueError( + "Cannot interpret version {} share file.".format(version), + ) + + return share_file_size - header_size - (number_of_leases * (4 + 32 + 32 + 4)) def get_lease_expiration(get_leases, storage_index_or_slot): diff --git a/src/_zkapauthorizer/tests/storage_common.py b/src/_zkapauthorizer/tests/storage_common.py index 4baf4de7b89ff6ab3c0ea7145b5d0fa347f3a8e9..d00a580c29adf51f1d39583012fbe09b11555678 100644 --- a/src/_zkapauthorizer/tests/storage_common.py +++ b/src/_zkapauthorizer/tests/storage_common.py @@ -16,6 +16,13 @@ ``allmydata.storage``-related helpers shared across the test suite. """ +from os import ( + SEEK_CUR, +) +from struct import ( + pack, +) + from twisted.python.filepath import ( FilePath, ) @@ -25,6 +32,9 @@ from .strategies import ( bytes_for_share, ) +# Hard-coded in Tahoe-LAFS +LEASE_INTERVAL = 60 * 60 * 24 * 31 + def cleanup_storage_server(storage_server): """ Delete all of the shares held by the given storage server. @@ -73,3 +83,53 @@ def write_toy_shares( for (sharenum, writer) in allocated.items(): writer.remote_write(0, bytes_for_share(sharenum, size)) writer.remote_close() + + +def whitebox_write_sparse_share(sharepath, version, size, leases, now): + """ + Write a zero-filled sparse (if the filesystem supports it) immutable share + to the given path. + + This assumes knowledge of the Tahoe-LAFS share file format. + + :param FilePath sharepath: The path to which to write the share file. + :param int version: The share version to write to the file. + :param int size: The share data size to write. + :param list leases: Renewal secrets for leases to write to the share file. + :param float now: The current time as a POSIX timestamp. + """ + # Maybe-saturated size (what at least one Tahoe-LAFS comment claims is + # appropriate for large files) + internal_size = min(size, 2 ** 32 - 1) + apparent_size = size + + header_format = ">LLL" + lease_format = ">L32s32sL" + with sharepath.open("wb") as share: + share.write( + pack( + header_format, + version, + internal_size, + len(leases), + ), + ) + # Try to make it sparse by skipping all the data. + share.seek(apparent_size - 1, SEEK_CUR), + share.write(b"\0") + share.write( + b"".join( + pack( + lease_format, + # no owner + 0, + renew, + # no cancel secret + b"", + # expiration timestamp + int(now + LEASE_INTERVAL), + ) + for renew + in leases + ), + ) diff --git a/src/_zkapauthorizer/tests/strategies.py b/src/_zkapauthorizer/tests/strategies.py index 3baf11475b7daa296d8ef621ba493175238a3a45..a438fa27ae10bd937f6e30421297da297fc753f6 100644 --- a/src/_zkapauthorizer/tests/strategies.py +++ b/src/_zkapauthorizer/tests/strategies.py @@ -524,6 +524,13 @@ def write_enabler_secrets(): ) +def share_versions(): + """ + Build integers which could be Tahoe-LAFS share file version numbers. + """ + return integers(min_value=0, max_value=2 ** 32 - 1) + + def sharenums(): """ Build Tahoe-LAFS share numbers. @@ -545,17 +552,19 @@ def sharenum_sets(): ) -def sizes(): +def sizes( + # Size 0 data isn't data, it's nothing. + min_value=1, + # Let this be larger than a single segment (2 ** 17) in case that matters + # to Tahoe-LAFS storage at all. I don't think it does, though. + max_value=2 ** 18, +): """ Build Tahoe-LAFS share sizes. """ return integers( - # Size 0 data isn't data, it's nothing. - min_value=1, - # Let this be larger than a single segment (2 ** 17) in case that - # matters to Tahoe-LAFS storage at all. I don't think it does, - # though. - max_value=2 ** 18, + min_value=min_value, + max_value=max_value, ) diff --git a/src/_zkapauthorizer/tests/test_storage_protocol.py b/src/_zkapauthorizer/tests/test_storage_protocol.py index f2b9b6895246583018d498422e84ad2265e4eb4c..a267c0667f3fe1b3101ab9e5a7a9eb10d8091a32 100644 --- a/src/_zkapauthorizer/tests/test_storage_protocol.py +++ b/src/_zkapauthorizer/tests/test_storage_protocol.py @@ -70,6 +70,10 @@ from challenge_bypass_ristretto import ( random_signing_key, ) +from allmydata.storage.common import ( + storage_index_to_dir, +) + from .common import ( skipIf, ) @@ -82,6 +86,7 @@ from .strategies import ( lease_renew_secrets, lease_cancel_secrets, write_enabler_secrets, + share_versions, sharenums, sharenum_sets, sizes, @@ -97,8 +102,10 @@ from .fixtures import ( AnonymousStorageServer, ) from .storage_common import ( + LEASE_INTERVAL, cleanup_storage_server, write_toy_shares, + whitebox_write_sparse_share, ) from .foolscap import ( LocalRemote, @@ -119,7 +126,6 @@ from ..foolscap import ( ShareStat, ) - class RequiredPassesTests(TestCase): """ Tests for ``required_passes``. @@ -353,19 +359,7 @@ class ShareTests(TestCase): Equals(int(now + self.server.LEASE_PERIOD.total_seconds())), ) - @given( - storage_index=storage_indexes(), - renew_secret=lease_renew_secrets(), - cancel_secret=lease_cancel_secrets(), - sharenum=sharenums(), - size=sizes(), - clock=clocks(), - ) - def test_stat_shares_immutable(self, storage_index, renew_secret, cancel_secret, sharenum, size, clock): - """ - Size and lease information about immutable shares can be retrieved from a - storage server. - """ + def _stat_shares_immutable_test(self, storage_index, sharenum, size, clock, leases, write_shares): # Hypothesis causes our storage server to be used many times. Clean # up between iterations. cleanup_storage_server(self.anonymous_storage_server) @@ -376,23 +370,27 @@ class ShareTests(TestCase): try: patch.setUp() # Create a share we can toy with. - write_toy_shares( + write_shares( self.anonymous_storage_server, storage_index, - renew_secret, - cancel_secret, {sharenum}, size, canary=self.canary, ) + # Perhaps put some more leases on it. Leases might impact our + # ability to determine share data size. + for renew_secret in leases: + self.anonymous_storage_server.remote_add_lease( + storage_index, + renew_secret, + b"", + ) finally: patch.cleanUp() stats = extract_result( self.client.stat_shares([storage_index]), ) - # Hard-coded in Tahoe-LAFS - LEASE_INTERVAL = 60 * 60 * 24 * 31 expected = [{ sharenum: ShareStat( size=size, @@ -404,6 +402,172 @@ class ShareTests(TestCase): Equals(expected), ) + @given( + storage_index=storage_indexes(), + renew_secret=lease_renew_secrets(), + cancel_secret=lease_cancel_secrets(), + sharenum=sharenums(), + size=sizes(), + clock=clocks(), + leases=lists(lease_renew_secrets(), unique=True), + ) + def test_stat_shares_immutable(self, storage_index, renew_secret, cancel_secret, sharenum, size, clock, leases): + """ + Size and lease information about immutable shares can be retrieved from a + storage server. + """ + return self._stat_shares_immutable_test( + storage_index, + sharenum, + size, + clock, + leases, + lambda storage_server, storage_index, sharenums, size, canary: write_toy_shares( + storage_server, + storage_index, + renew_secret, + cancel_secret, + sharenums, + size, + canary, + ), + ) + + @given( + storage_index=storage_indexes(), + sharenum=sharenums(), + size=sizes(), + clock=clocks(), + leases=lists(lease_renew_secrets(), unique=True, min_size=1), + version=share_versions(), + ) + def test_stat_shares_immutable_wrong_version(self, storage_index, sharenum, size, clock, leases, version): + """ + If a share file with an unexpected version is found, ``stat_shares`` + declines to offer a result (by raising ``ValueError``). + """ + assume(version != 1) + + # Hypothesis causes our storage server to be used many times. Clean + # up between iterations. + cleanup_storage_server(self.anonymous_storage_server) + + sharedir = FilePath(self.anonymous_storage_server.sharedir).preauthChild( + # storage_index_to_dir likes to return multiple segments + # joined by pathsep + storage_index_to_dir(storage_index), + ) + sharepath = sharedir.child(u"{}".format(sharenum)) + sharepath.parent().makedirs() + whitebox_write_sparse_share( + sharepath, + version=version, + size=size, + leases=leases, + now=clock.seconds(), + ) + + self.assertThat( + self.client.stat_shares([storage_index]), + failed( + AfterPreprocessing( + lambda f: f.value, + IsInstance(ValueError), + ), + ), + ) + + @given( + storage_index=storage_indexes(), + sharenum=sharenums(), + size=sizes(), + clock=clocks(), + version=share_versions(), + # Encode our knowledge of the share header format and size right here... + position=integers(min_value=0, max_value=11), + ) + def test_stat_shares_truncated_file(self, storage_index, sharenum, size, clock, version, position): + """ + If a share file is truncated in the middle of the header, + ``stat_shares`` declines to offer a result (by raising + ``ValueError``). + """ + # Hypothesis causes our storage server to be used many times. Clean + # up between iterations. + cleanup_storage_server(self.anonymous_storage_server) + + sharedir = FilePath(self.anonymous_storage_server.sharedir).preauthChild( + # storage_index_to_dir likes to return multiple segments + # joined by pathsep + storage_index_to_dir(storage_index), + ) + sharepath = sharedir.child(u"{}".format(sharenum)) + sharepath.parent().makedirs() + whitebox_write_sparse_share( + sharepath, + version=version, + size=size, + # We know leases are at the end, where they'll get chopped off, so + # we don't bother to write any. + leases=[], + now=clock.seconds(), + ) + with sharepath.open("wb") as fobj: + fobj.truncate(position) + + self.assertThat( + self.client.stat_shares([storage_index]), + failed( + AfterPreprocessing( + lambda f: f.value, + IsInstance(ValueError), + ), + ), + ) + + + @skipIf(platform.isWindows(), "Creating large files on Windows (no sparse files) is too slow") + @given( + storage_index=storage_indexes(), + sharenum=sharenums(), + size=sizes(min_value=2 ** 18, max_value=2 ** 40), + clock=clocks(), + leases=lists(lease_renew_secrets(), unique=True, min_size=1), + ) + def test_stat_shares_immutable_large(self, storage_index, sharenum, size, clock, leases): + """ + Size and lease information about very large immutable shares can be + retrieved from a storage server. + + This is more of a whitebox test. It assumes knowledge of Tahoe-LAFS + share placement and layout. This is necessary to avoid having to + write real multi-gigabyte files to exercise the behavior. + """ + def write_shares(storage_server, storage_index, sharenums, size, canary): + sharedir = FilePath(storage_server.sharedir).preauthChild( + # storage_index_to_dir likes to return multiple segments + # joined by pathsep + storage_index_to_dir(storage_index), + ) + for sharenum in sharenums: + sharepath = sharedir.child(u"{}".format(sharenum)) + sharepath.parent().makedirs() + whitebox_write_sparse_share( + sharepath, + version=1, + size=size, + leases=leases, + now=clock.seconds(), + ) + + return self._stat_shares_immutable_test( + storage_index, + sharenum, + size, + clock, + leases, + write_shares, + ) @skipIf(platform.isWindows(), "Storage server miscomputes slot size on Windows") @given( @@ -454,8 +618,6 @@ class ShareTests(TestCase): stats = extract_result( self.client.stat_shares([storage_index]), ) - # Hard-coded in Tahoe-LAFS - LEASE_INTERVAL = 60 * 60 * 24 * 31 expected = [{ sharenum: ShareStat( size=get_implied_data_length( diff --git a/tahoe-lafs-repo.nix b/tahoe-lafs-repo.nix new file mode 100644 index 0000000000000000000000000000000000000000..fc944e5739e7246a922761b2c7abaf05b62b8327 --- /dev/null +++ b/tahoe-lafs-repo.nix @@ -0,0 +1,9 @@ +let + pkgs = import <nixpkgs> {}; +in + pkgs.fetchFromGitHub { + owner = "tahoe-lafs"; + repo = "tahoe-lafs"; + rev = "tahoe-lafs-1.14.0"; + sha256 = "1ahdiapg57g6icv7p2wbzgkwl9lzdlgrsvbm5485414m7z2d6las"; + } diff --git a/tahoe-lafs.nix b/tahoe-lafs.nix index c305267b8914012921a9896a2e42ef2560f42a38..212439638fb54e1dfb57d5ae91784759e234d02e 100644 --- a/tahoe-lafs.nix +++ b/tahoe-lafs.nix @@ -1,9 +1,20 @@ +{ python2Packages }: let - pkgs = import <nixpkgs> {}; + # Manually assemble the tahoe-lafs build inputs because tahoe-lafs 1.14.0 + # eliot package runs the eliot test suite which is flaky. Doing this gives + # us a place to insert a `doCheck = false` (at the cost of essentially + # duplicating tahoe-lafs' default.nix). Not ideal but at least we can throw + # it away when we upgrade to the next tahoe-lafs version. + repo = ((import ./tahoe-lafs-repo.nix) + "/nix"); + nevow-drv = repo + "/nevow.nix"; + nevow = python2Packages.callPackage nevow-drv { }; + eliot-drv = repo + "/eliot.nix"; + eliot = (python2Packages.callPackage eliot-drv { }).overrideAttrs (old: { + doInstallCheck = false; + }); + tahoe-lafs-drv = repo + "/tahoe-lafs.nix"; + tahoe-lafs = python2Packages.callPackage tahoe-lafs-drv { + inherit nevow eliot; + }; in - pkgs.fetchFromGitHub { - owner = "tahoe-lafs"; - repo = "tahoe-lafs"; - rev = "34aeefd3ddbf28dafbc3477e52461eafa53b545d"; - sha256 = "0l8n4njbzgiwmn3qsmvzyzqlb0y9bj9g2jvpdynvsn1ggxrqmvsq"; - } \ No newline at end of file + tahoe-lafs diff --git a/zkapauthorizer.nix b/zkapauthorizer.nix index a5e611b4c8879f65aa12c63c968de225a96834ac..89639e0d41ec84db5811503dd150f9c33250d0a3 100644 --- a/zkapauthorizer.nix +++ b/zkapauthorizer.nix @@ -1,6 +1,6 @@ { lib , buildPythonPackage, sphinx, git -, attrs, zope_interface, eliot, aniso8601, twisted, tahoe-lafs, challenge-bypass-ristretto, treq +, attrs, zope_interface, aniso8601, twisted, tahoe-lafs, challenge-bypass-ristretto, treq , fixtures, testtools, hypothesis, pyflakes, coverage , hypothesisProfile ? null , collectCoverage ? false @@ -27,11 +27,18 @@ buildPythonPackage rec { sphinx ]; + patches = [ + # Remove the Tahoe-LAFS version pin in distutils config. We have our own + # pinning and also our Tahoe-LAFS package has a bogus version number. :/ + ./nix/setup.cfg.patch + ]; + propagatedBuildInputs = [ attrs zope_interface aniso8601 - eliot + # Inherit eliot from tahoe-lafs + # eliot twisted tahoe-lafs challenge-bypass-ristretto @@ -49,7 +56,7 @@ buildPythonPackage rec { runHook preCheck "${pyflakes}/bin/pyflakes" src/_zkapauthorizer ZKAPAUTHORIZER_HYPOTHESIS_PROFILE=${hypothesisProfile'} python -m ${if collectCoverage - then "coverage run --branch --source _zkapauthorizer,twisted.plugins.zkapauthorizer --module" + then "coverage run --debug=config --module" else "" } twisted.trial ${extraTrialArgs} ${testSuite'} runHook postCheck @@ -57,10 +64,8 @@ buildPythonPackage rec { postCheck = if collectCoverage then '' - python -m coverage html mkdir -p "$doc/share/doc/${name}" - cp -vr .coverage htmlcov "$doc/share/doc/${name}" - python -m coverage report + cp -v .coverage.* "$doc/share/doc/${name}" '' else ""; }