diff --git a/nix/sources.json b/nix/sources.json index cebb37b71e05a1aa6ccb7f1e46ed69ec2aaabfd7..9ce1d8e0cf1793a5db2e50d82e8758ec645578e7 100644 --- a/nix/sources.json +++ b/nix/sources.json @@ -3,12 +3,12 @@ "branch": "master", "description": "Create highly reproducible python environments", "homepage": "", - "owner": "DavHau", + "owner": "PrivateStorageio", "repo": "mach-nix", - "rev": "dc94135e31d5c90c40a00a6cbdf9330526e8153b", - "sha256": "08l7v0hn9cs8irda0kd55c6lmph3an2i7p47wh2d48hza9pipckr", + "rev": "4d2cc6c6fe5b2875e7b48a84a1b1f688c6991c42", + "sha256": "03xabrwzbby6dcp3w4li7p9cxsca5n2jlz452sz7r4h1n5sx9mwg", "type": "tarball", - "url": "https://github.com/DavHau/mach-nix/archive/dc94135e31d5c90c40a00a6cbdf9330526e8153b.tar.gz", + "url": "https://github.com/PrivateStorageio/mach-nix/archive/4d2cc6c6fe5b2875e7b48a84a1b1f688c6991c42.tar.gz", "url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz" }, "niv": { @@ -47,10 +47,10 @@ "homepage": "https://tahoe-lafs.org/", "owner": "tahoe-lafs", "repo": "tahoe-lafs", - "rev": "0a072a98c80f0a9ae1913674b54696ebc992406a", - "sha256": "13mbvg371xd2zncy8l9zxl9xv5fjxfddgp8vlv854z260ss4535r", + "rev": "d3c6f58a8ded7db3324ef97c47f5c1921c3d58b7", + "sha256": "18zr6l53r32pigymsnv10m67kgf981bxl8c3rjhv5bikfnf986q8", "type": "tarball", - "url": "https://github.com/tahoe-lafs/tahoe-lafs/archive/0a072a98c80f0a9ae1913674b54696ebc992406a.tar.gz", + "url": "https://github.com/tahoe-lafs/tahoe-lafs/archive/d3c6f58a8ded7db3324ef97c47f5c1921c3d58b7.tar.gz", "url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz" } } diff --git a/setup.cfg b/setup.cfg index 0c4f044719917527c1935362b1b2b41492e8b4c3..ebbd1ac295db4e0950f28815949e5900618e59b1 100644 --- a/setup.cfg +++ b/setup.cfg @@ -40,7 +40,12 @@ install_requires = # incompatible with Tahoe-LAFS'. So duplicate them here (the ones that # have been observed to cause problems). Twisted[tls,conch]>=18.4.0 - tahoe-lafs >=1.14, <1.17, !=1.15.* + + # Lease renewal changes aren't available from a release of Tahoe yet. + # Note "Public index servers SHOULD NOT allow the use of direct references + # in uploaded distributions." + # https://www.python.org/dev/peps/pep-0440/#direct-references + tahoe-lafs @ https://github.com/tahoe-lafs/tahoe-lafs/archive/d3c6f58a8ded7db3324ef97c47f5c1921c3d58b7.zip treq pyutil diff --git a/src/_zkapauthorizer/_storage_server.py b/src/_zkapauthorizer/_storage_server.py index 09934d352a1552365e2c8a3deb20333ee4f5debd..e379989c36f4ae1054da88791e8604a9c1fc92f6 100644 --- a/src/_zkapauthorizer/_storage_server.py +++ b/src/_zkapauthorizer/_storage_server.py @@ -205,7 +205,7 @@ class ZKAPAuthorizerStorageServer(Referenceable): # Note: The *allocate_buckets* protocol allows for some shares to # already exist on the server. When this is the case, the cost of the - # operation is based only on the buckets which are really allocated + # operation is based only on the shares which are really allocated # here. It's not clear if we can allow the client to supply the # reduced number of passes in the call but we can be sure to only mark # as spent enough passes to cover the allocated buckets. The return @@ -230,14 +230,23 @@ class ZKAPAuthorizerStorageServer(Referenceable): allocated_size, ) - return self._original.remote_allocate_buckets( + alreadygot, bucketwriters = self._original._allocate_buckets( storage_index, renew_secret, cancel_secret, sharenums, allocated_size, - canary, + renew_leases=False, ) + # Copy/paste the disconnection handling logic from + # StorageServer.remote_allocate_buckets. + for bw in bucketwriters.values(): + disconnect_marker = canary.notifyOnDisconnect(bw.disconnected) + self._original._bucket_writer_disconnect_markers[bw] = ( + canary, + disconnect_marker, + ) + return alreadygot, bucketwriters def remote_get_buckets(self, storage_index): """ @@ -366,9 +375,7 @@ class ZKAPAuthorizerStorageServer(Referenceable): # Skip over the remotely exposed method and jump to the underlying # implementation which accepts one additional parameter that we know - # about (and don't expose over the network): renew_leases. We always - # pass False for this because we want to manage leases completely - # separately from writes. + # about (and don't expose over the network): renew_leases. return self._original.slot_testv_and_readv_and_writev( storage_index, secrets, diff --git a/src/_zkapauthorizer/tests/test_storage_protocol.py b/src/_zkapauthorizer/tests/test_storage_protocol.py index a77dc29266a905226ea7a38c2d9bd13f7fc96072..32e7e406380eaff0ce140f22b37db0cf394ae0a3 100644 --- a/src/_zkapauthorizer/tests/test_storage_protocol.py +++ b/src/_zkapauthorizer/tests/test_storage_protocol.py @@ -19,6 +19,7 @@ Tests for communication between the client and server components. from __future__ import absolute_import from allmydata.storage.common import storage_index_to_dir +from allmydata.storage.shares import get_share_file from challenge_bypass_ristretto import random_signing_key from fixtures import MonkeyPatch from foolscap.referenceable import LocalReferenceable @@ -357,6 +358,8 @@ class ShareTests(TestCase): cancel_secret=lease_cancel_secrets(), existing_sharenums=sharenum_sets(), additional_sharenums=sharenum_sets(), + when=posix_timestamps(), + interval=integers(min_value=1, max_value=60 * 60 * 24 * 31), size=sizes(), ) def test_shares_already_exist( @@ -366,6 +369,8 @@ class ShareTests(TestCase): cancel_secret, existing_sharenums, additional_sharenums, + when, + interval, size, ): """ @@ -374,14 +379,22 @@ class ShareTests(TestCase): """ # A helper that only varies on sharenums. def allocate_buckets(sharenums): - return self.client.allocate_buckets( - storage_index, - renew_secret, - cancel_secret, - sharenums, - size, - canary=self.canary, + alreadygot, writers = extract_result( + self.client.allocate_buckets( + storage_index, + renew_secret, + cancel_secret, + sharenums, + size, + canary=self.canary, + ), ) + for sharenum, writer in writers.items(): + writer.remote_write(0, bytes_for_share(sharenum, size)) + writer.remote_close() + + # Set some arbitrary time so we can inspect lease renewal behavior. + self.clock.advance(when) # Create some shares to alter the behavior of the next # allocate_buckets. @@ -395,14 +408,15 @@ class ShareTests(TestCase): canary=self.canary, ) + # Let some time pass so leases added after this point will look + # different from leases added before this point. + self.clock.advance(interval) + # Do a partial repeat of the operation. Shuffle around # the shares in some random-ish way. If there is partial overlap # there should be partial spending. all_sharenums = existing_sharenums | additional_sharenums - self.assertThat( - allocate_buckets(all_sharenums), - succeeded(Always()), - ) + allocate_buckets(all_sharenums) # This is what the client should try to spend. This should also match # the total number of passes issued during the test. @@ -435,6 +449,38 @@ class ShareTests(TestCase): ), ) + def get_lease_grant_times(storage_server, storage_index): + """ + Get the grant times for all of the leases for all of the shares at the + given storage index. + """ + shares = storage_server._get_bucket_shares(storage_index) + for sharenum, sharepath in shares: + sharefile = get_share_file(sharepath) + leases = sharefile.get_leases() + grant_times = list( + lease.get_grant_renew_time_time() for lease in leases + ) + yield sharenum, grant_times + + expected_leases = {} + # Chop off the non-integer part of the expected values because share + # files only keep integer precision. + expected_leases.update( + {sharenum: [int(when)] for sharenum in existing_sharenums} + ) + expected_leases.update( + { + sharenum: [int(when + interval)] + for sharenum in all_sharenums - existing_sharenums + } + ) + + self.assertThat( + dict(get_lease_grant_times(self.anonymous_storage_server, storage_index)), + Equals(expected_leases), + ) + @given( storage_index=storage_indexes(), renew_secrets=tuples(lease_renew_secrets(), lease_renew_secrets()),