diff --git a/nixos/modules/private-storage.nix b/nixos/modules/private-storage.nix index 6892f3f48cd1ea5de275380b462f7daa2ca69ff3..f9521e44455bac9b751b7cd55ff748c1d3704e5a 100644 --- a/nixos/modules/private-storage.nix +++ b/nixos/modules/private-storage.nix @@ -67,6 +67,14 @@ in The port number on which to service storage clients. ''; }; + services.private-storage.publicReadOnlyStoragePort = lib.mkOption + { default = 8899; + type = lib.types.int; + example = 8099; + description = '' + The port number on which to service read-only storage clients. + ''; + }; services.private-storage.issuerRootURL = lib.mkOption { default = "https://issuer.${config.networking.domain}/"; type = lib.types.str; @@ -96,7 +104,35 @@ in # Define configuration based on values given for our options - starting with # the option that says whether this is even turned on. config = lib.mkIf cfg.enable - { services.tahoe.nodes."${storage-node-name}" = + { + # A read-only storage service. This allows read-only access for clients + # that use Great Black Swamp. There is no ZKAP/GBS integration yet so + # this is the most we can do at the moment. + services.tahoe.nodes."ro-${storage-node-name}" = + { package = cfg.tahoe.package; + sections = + { client = if cfg.introducerFURL == null then {} else + { "introducer.furl" = cfg.introducerFURL; + }; + node = + { nickname = "ro-${storage-node-name}"; + "tub.port" = "tcp:${toString cfg.publicReadOnlyStoragePort}"; + "tub.location" = "tcp:${cfg.publicAddress}:${toString cfg.publicReadOnlyStoragePort}"; + }; + storage = + { enabled = true; + storage_dir = "/storage"; + readonly = true; + force_foolscap = false; + }; + }; + }; + # Tahoe nixos module brings along a single socket for the web api. + # That's for the other storage node though. Turn off the integration + # with this one. + systemd.services."tahoe.ro-storage".unitConfig.Requires = []; + + services.tahoe.nodes."${storage-node-name}" = { package = cfg.tahoe.package; # Each attribute in this set corresponds to a section in the # tahoe.cfg file. Attributes on those sets correspond to individual @@ -147,7 +183,7 @@ in }; # Let traffic destined for the storage node's Foolscap server through. - networking.firewall.allowedTCPPorts = [ cfg.publicStoragePort ]; + networking.firewall.allowedTCPPorts = [ cfg.publicStoragePort cfg.publicReadOnlyStoragePort ]; systemd.tmpfiles.rules = # Add a rule to prevent incident reports from accumulating indefinitely. diff --git a/nixos/tests/exercise-storage.py b/nixos/tests/exercise-storage.py index 288a846d87e6fc468f22bc0e634ae4430c17791b..a4e177b5aa9db7372a41214d2ab4afeef1d23c13 100755 --- a/nixos/tests/exercise-storage.py +++ b/nixos/tests/exercise-storage.py @@ -47,7 +47,12 @@ def block_until_connected(api_root): in servers if server["connection_status"].startswith("Connected to ") ) - if len(connected) >= 1: + # There is a read-only server and a read-write server! The easiest + # way to be sure we've connected to the read-write server is to wait + # until we're connected to both. Also, if we manage to connect to two + # servers this gives us some confidence that both the read-only and + # read-write servers are running. + if len(connected) >= 2: print( "Connected to a server:\n" "\t{nodeid}\n"