diff --git a/src/_zkapauthorizer/_storage_client.py b/src/_zkapauthorizer/_storage_client.py index 3e87eb87a0291618004346ba1830f7449c3f85a3..579312ea527aaa30a09304262aaf8fb55f63d1b0 100644 --- a/src/_zkapauthorizer/_storage_client.py +++ b/src/_zkapauthorizer/_storage_client.py @@ -176,6 +176,12 @@ class ZKAPAuthorizerStorageClient(object): ) )) + def stat_shares(self, storage_indexes): + return self._rref.callRemote( + "stat_shares", + storage_indexes, + ) + def advise_corrupt_share( self, share_type, diff --git a/src/_zkapauthorizer/_storage_server.py b/src/_zkapauthorizer/_storage_server.py index a22163048add55113a9527b7e670aa3c70686b5c..c06dea97b62eff7bd897f2cc6fb01e6ed129e40d 100644 --- a/src/_zkapauthorizer/_storage_server.py +++ b/src/_zkapauthorizer/_storage_server.py @@ -81,6 +81,7 @@ from twisted.internet.interfaces import ( ) from .foolscap import ( + ShareStat, RIPrivacyPassAuthorizedStorageServer, ) from .storage_common import ( @@ -272,6 +273,13 @@ class ZKAPAuthorizerStorageServer(Referenceable): get_share_sizes(self._original, storage_index_or_slot, sharenums) ) + def remote_stat_shares(self, storage_indexes_or_slots): + return list( + dict(stat_share(self._original, storage_index_or_slot)) + for storage_index_or_slot + in storage_indexes_or_slots + ) + def remote_slot_testv_and_readv_and_writev( self, passes, @@ -483,18 +491,12 @@ def get_share_sizes(storage_server, storage_index_or_slot, sharenums): share number and the second element is the data size for that share number. """ - get_size = None + stat = None for sharenum, sharepath in get_all_share_paths(storage_server, storage_index_or_slot): - if get_size is None: - # Figure out if it is a storage index or a slot. - with open(sharepath) as share_file: - magic = share_file.read(32) - if magic == "Tahoe mutable container v1\n" + "\x75\x09\x44\x03\x8e": - get_size = get_slot_share_size - else: - get_size = get_storage_index_share_size + if stat is None: + stat = get_stat(sharepath) if sharenums is None or sharenum in sharenums: - yield sharenum, get_size(sharepath) + yield sharenum, stat(storage_server, storage_index_or_slot, sharepath).size def get_storage_index_share_size(sharepath): @@ -511,6 +513,41 @@ def get_storage_index_share_size(sharepath): return share_data_length +def get_lease_expiration(get_leases, storage_index_or_slot): + """ + Get the lease expiration time for the shares in a bucket or slot, or None + if there is no lease on them. + + :param get_leases: A one-argument callable which returns the leases. + + :param storage_index_or_slot: Either a storage index or a slot identifying + the shares the leases of which to inspect. + """ + for lease in get_leases(storage_index_or_slot): + return lease.get_expiration_time() + return None + + +def stat_bucket(storage_server, storage_index, sharepath): + """ + Get a ``ShareStat`` for the shares in a bucket. + """ + return ShareStat( + size=get_storage_index_share_size(sharepath), + lease_expiration=get_lease_expiration(storage_server.get_leases, storage_index), + ) + + +def stat_slot(storage_server, slot, sharepath): + """ + Get a ``ShareStat`` for the shares in a slot. + """ + return ShareStat( + size=get_slot_share_size(sharepath), + lease_expiration=get_lease_expiration(storage_server.get_slot_leases, slot), + ) + + def get_slot_share_size(sharepath): """ Get the size of a share belonging to a slot (a mutable share). @@ -525,6 +562,36 @@ def get_slot_share_size(sharepath): return share_data_length +def stat_share(storage_server, storage_index_or_slot): + """ + Get a ``ShareStat`` for each share in a bucket or a slot. + + :return: An iterator of two-tuples of share number and corresponding + ``ShareStat``. + """ + stat = None + for sharenum, sharepath in get_all_share_paths(storage_server, storage_index_or_slot): + if stat is None: + stat = get_stat(sharepath) + yield (sharenum, stat(storage_server, storage_index_or_slot, sharepath)) + + +def get_stat(sharepath): + """ + Get a function that can retrieve the metadata from the share at the given + path. + + This is necessary to differentiate between buckets and slots. + """ + # Figure out if it is a storage index or a slot. + with open(sharepath) as share_file: + magic = share_file.read(32) + if magic == "Tahoe mutable container v1\n" + "\x75\x09\x44\x03\x8e": + return stat_slot + else: + return stat_bucket + + # I don't understand why this is required. # ZKAPAuthorizerStorageServer is-a Referenceable. It seems like # the built in adapter should take care of this case. diff --git a/src/_zkapauthorizer/foolscap.py b/src/_zkapauthorizer/foolscap.py index 67b1b2f7c0b2d55c76f8a37cf7d4e6b8cf499be6..f0137ac2714a51bf15fd127b2c4265b4017e50cb 100644 --- a/src/_zkapauthorizer/foolscap.py +++ b/src/_zkapauthorizer/foolscap.py @@ -1,7 +1,28 @@ +# Copyright 2019 PrivateStorage.io, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Definitions related to the Foolscap-based protocol used by ZKAPAuthorizer +to communicate between storage clients and servers. +""" + from __future__ import ( absolute_import, ) +import attr + from foolscap.constraint import ( ByteStringConstraint, ) @@ -9,6 +30,7 @@ from foolscap.api import ( Any, DictOf, ListOf, + Copyable, ) from foolscap.remoteinterface import ( RemoteMethodSchema, @@ -21,6 +43,20 @@ from allmydata.interfaces import ( Offset, ) +@attr.s +class ShareStat(Copyable): + """ + Represent some metadata about a share. + + :ivar int size: The size. in bytes, of the share. + + :ivar int lease_expiration: The POSIX timestamp of the time at which the + lease on this share expires, or None if there is no lease. + """ + size = attr.ib() + lease_expiration = attr.ib() + + # The Foolscap convention seems to be to try to constrain inputs to valid # values. So we'll try to limit the number of passes a client can supply. # Foolscap may be moving away from this so we may eventually drop this as @@ -134,6 +170,15 @@ class RIPrivacyPassAuthorizedStorageServer(RemoteInterface): """ return DictOf(int, Offset) + def stat_shares( + storage_indexes_or_slots=ListOf(StorageIndex), + ): + """ + Get various metadata about shares in the given storage index or slot. + """ + # Any() should be ShareStat but I don't know how to spell that. + return ListOf(ListOf(DictOf(int, Any()))) + slot_readv = RIStorageServer["slot_readv"] slot_testv_and_readv_and_writev = add_passes( diff --git a/src/_zkapauthorizer/tests/strategies.py b/src/_zkapauthorizer/tests/strategies.py index 223a7ec3089d576139e322eb362dccba4f77d044..f4d1ce5029dd1975e220ebd9d078c8fb3a7320f5 100644 --- a/src/_zkapauthorizer/tests/strategies.py +++ b/src/_zkapauthorizer/tests/strategies.py @@ -19,6 +19,9 @@ Hypothesis strategies for property testing. from base64 import ( urlsafe_b64encode, ) +from datetime import ( + datetime, +) import attr @@ -39,6 +42,9 @@ from hypothesis.strategies import ( datetimes, ) +from twisted.internet.task import ( + Clock, +) from twisted.web.test.requesthelper import ( DummyRequest, ) @@ -541,3 +547,39 @@ def announcements(): return just({ u"ristretto-issuer-root-url": u"https://issuer.example.invalid/", }) + + +_POSIX_EPOCH = datetime.utcfromtimestamp(0) + +def posix_safe_datetimes(): + """ + Build datetime instances in a range that can be represented as floats + without losing microsecond precision. + """ + return datetimes( + # I don't know that time-based parts of the system break down + # before the POSIX epoch but I don't know that they work, either. + # Don't time travel with this code. + min_value=_POSIX_EPOCH, + # Once we get far enough into the future we lose the ability to + # represent a timestamp with microsecond precision in a floating point + # number, which we do with any POSIX timestamp-like API (eg + # twisted.internet.task.Clock). So don't go far enough into the + # future. Furthermore, once we don't fit into an unsigned 4 byte + # integers, we can't round-trip through all the things that expect a + # time_t. Stay back from the absolute top to give tests a little + # space to advance time, too. + max_value=datetime.utcfromtimestamp(2 ** 31), + ) + + +def clocks(now=posix_safe_datetimes()): + """ + Build ``twisted.internet.task.Clock`` instances set to a time built by + ``now``. + """ + def clock_at_time(when): + c = Clock() + c.advance((when - _POSIX_EPOCH).total_seconds()) + return c + return now.map(clock_at_time) diff --git a/src/_zkapauthorizer/tests/test_controller.py b/src/_zkapauthorizer/tests/test_controller.py index 672c95fa682d25dbe8ed90e484a2f6596ecc7d89..5485b16bf2bcd0f0790ec7b2f123901c7fef88b2 100644 --- a/src/_zkapauthorizer/tests/test_controller.py +++ b/src/_zkapauthorizer/tests/test_controller.py @@ -64,9 +64,6 @@ from hypothesis.strategies import ( from twisted.python.url import ( URL, ) -from twisted.internet.task import ( - Clock, -) from twisted.internet.defer import ( fail, ) @@ -119,6 +116,7 @@ from .strategies import ( tahoe_configs, vouchers, voucher_objects, + clocks, ) from .matchers import ( Provides, @@ -127,8 +125,6 @@ from .fixtures import ( TemporaryVoucherStore, ) -POSIX_EPOCH = datetime.utcfromtimestamp(0) - class PaymentControllerTests(TestCase): """ Tests for ``PaymentController``. @@ -232,32 +228,20 @@ class PaymentControllerTests(TestCase): @given( tahoe_configs(), - datetimes( - # I don't know that time-based parts of the system break down - # before the POSIX epoch but I don't know that they work, either. - # Don't time travel with this code. - min_value=POSIX_EPOCH, - # Once we get far enough into the future we lose the ability to - # represent a timestamp with microsecond precision in a floating - # point number, which we do with Clock. So don't go far enough - # into the future. - max_value=datetime(2200, 1, 1), - ), + clocks(), vouchers(), ) - def test_redeem_error_after_delay(self, get_config, now, voucher): + def test_redeem_error_after_delay(self, get_config, clock, voucher): """ When ``PaymentController`` receives a non-terminal error trying to redeem a voucher, after some time passes it tries to redeem the voucher again. """ - clock = Clock() - clock.advance((now - POSIX_EPOCH).total_seconds()) - + datetime_now = lambda: datetime.utcfromtimestamp(clock.seconds()) store = self.useFixture( TemporaryVoucherStore( get_config, - lambda: datetime.utcfromtimestamp(clock.seconds()), + datetime_now, ), ).store controller = PaymentController( @@ -272,7 +256,7 @@ class PaymentControllerTests(TestCase): MatchesAll( IsInstance(model_Unpaid), MatchesStructure( - finished=Equals(now), + finished=Equals(datetime_now()), ), ) ) @@ -288,7 +272,7 @@ class PaymentControllerTests(TestCase): IsInstance(model_Unpaid), MatchesStructure( # At the new time, demonstrating the retry was performed. - finished=Equals(now + interval), + finished=Equals(datetime_now()), ), ), ) diff --git a/src/_zkapauthorizer/tests/test_storage_protocol.py b/src/_zkapauthorizer/tests/test_storage_protocol.py index c045a03f37d7418c1adb1e7862c0b27e61853939..53a272a1219950b775ded88c7b78478b0b49fafd 100644 --- a/src/_zkapauthorizer/tests/test_storage_protocol.py +++ b/src/_zkapauthorizer/tests/test_storage_protocol.py @@ -80,6 +80,7 @@ from .strategies import ( sharenum_sets, sizes, test_and_write_vectors_for_shares, + clocks, # Not really a strategy... bytes_for_share, ) @@ -99,10 +100,14 @@ from ..api import ( ) from ..storage_common import ( slot_testv_and_readv_and_writev_message, + get_implied_data_length, ) from ..model import ( Pass, ) +from ..foolscap import ( + ShareStat, +) @attr.s class LocalRemote(object): @@ -334,6 +339,125 @@ class ShareTests(TestCase): Equals(int(now + self.server.LEASE_PERIOD.total_seconds())), ) + @given( + storage_index=storage_indexes(), + renew_secret=lease_renew_secrets(), + cancel_secret=lease_cancel_secrets(), + sharenum=sharenums(), + size=sizes(), + clock=clocks(), + ) + def test_stat_shares_immutable(self, storage_index, renew_secret, cancel_secret, sharenum, size, clock): + """ + Size and lease information about immutable shares can be retrieved from a + storage server. + """ + # Hypothesis causes our storage server to be used many times. Clean + # up between iterations. + cleanup_storage_server(self.anonymous_storage_server) + + # anonymous_storage_server uses time.time(), unfortunately. And + # useFixture does not interact very well with Hypothesis. + patch = MonkeyPatch("time.time", clock.seconds) + try: + patch.setUp() + # Create a share we can toy with. + write_toy_shares( + self.anonymous_storage_server, + storage_index, + renew_secret, + cancel_secret, + {sharenum}, + size, + canary=self.canary, + ) + finally: + patch.cleanUp() + + stats = extract_result( + self.client.stat_shares([storage_index]), + ) + # Hard-coded in Tahoe-LAFS + LEASE_INTERVAL = 60 * 60 * 24 * 31 + expected = [{ + sharenum: ShareStat( + size=size, + lease_expiration=int(clock.seconds() + LEASE_INTERVAL), + ), + }] + self.assertThat( + stats, + Equals(expected), + ) + + + @given( + storage_index=storage_indexes(), + secrets=tuples( + write_enabler_secrets(), + lease_renew_secrets(), + lease_cancel_secrets(), + ), + test_and_write_vectors_for_shares=test_and_write_vectors_for_shares(), + clock=clocks(), + ) + def test_stat_shares_mutable(self, storage_index, secrets, test_and_write_vectors_for_shares, clock): + """ + Size and lease information about mutable shares can be retrieved from a + storage server. + """ + # Hypothesis causes our storage server to be used many times. Clean + # up between iterations. + cleanup_storage_server(self.anonymous_storage_server) + + # anonymous_storage_server uses time.time(), unfortunately. And + # useFixture does not interact very well with Hypothesis. + patch = MonkeyPatch("time.time", clock.seconds) + try: + patch.setUp() + # Create a share we can toy with. + wrote, read = extract_result( + self.client.slot_testv_and_readv_and_writev( + storage_index, + secrets=secrets, + tw_vectors={ + k: v.for_call() + for (k, v) + in test_and_write_vectors_for_shares.items() + }, + r_vector=[], + ), + ) + finally: + patch.cleanUp() + self.assertThat( + wrote, + Equals(True), + u"Server rejected a write to a new mutable slot", + ) + + stats = extract_result( + self.client.stat_shares([storage_index]), + ) + # Hard-coded in Tahoe-LAFS + LEASE_INTERVAL = 60 * 60 * 24 * 31 + expected = [{ + sharenum: ShareStat( + size=get_implied_data_length( + vectors.write_vector, + vectors.new_length, + ), + lease_expiration=int(clock.seconds() + LEASE_INTERVAL), + ) + for (sharenum, vectors) + in test_and_write_vectors_for_shares.items() + }] + self.assertThat( + stats, + Equals(expected), + ) + + @given( storage_index=storage_indexes(), renew_secret=lease_renew_secrets(),