diff --git a/nixos/modules/private-storage.nix b/nixos/modules/private-storage.nix
index 1b55614859801fd7580dd50508a10382bb493ae3..f9521e44455bac9b751b7cd55ff748c1d3704e5a 100644
--- a/nixos/modules/private-storage.nix
+++ b/nixos/modules/private-storage.nix
@@ -67,6 +67,14 @@ in
         The port number on which to service storage clients.
       '';
     };
+    services.private-storage.publicReadOnlyStoragePort = lib.mkOption
+    { default = 8899;
+      type = lib.types.int;
+      example = 8099;
+      description = ''
+        The port number on which to service read-only storage clients.
+      '';
+    };
     services.private-storage.issuerRootURL = lib.mkOption
     { default = "https://issuer.${config.networking.domain}/";
       type = lib.types.str;
@@ -96,68 +104,96 @@ in
   # Define configuration based on values given for our options - starting with
   # the option that says whether this is even turned on.
   config = lib.mkIf cfg.enable
-  { services.tahoe.nodes."${storage-node-name}" =
-    { package = cfg.tahoe.package;
-      # Each attribute in this set corresponds to a section in the tahoe.cfg
-      # file.  Attributes on those sets correspond to individual assignments
-      # in those sections.
-      #
-      # We just populate this according to policy/preference of Private
-      # Storage.
-      sections =
-      { client = if cfg.introducerFURL == null then {} else
-        { "introducer.furl" = cfg.introducerFURL;
+    {
+      # A read-only storage service.  This allows read-only access for clients
+      # that use Great Black Swamp.  There is no ZKAP/GBS integration yet so
+      # this is the most we can do at the moment.
+      services.tahoe.nodes."ro-${storage-node-name}" =
+        { package = cfg.tahoe.package;
+          sections =
+            { client = if cfg.introducerFURL == null then {} else
+                { "introducer.furl" = cfg.introducerFURL;
+                };
+              node =
+                { nickname = "ro-${storage-node-name}";
+                  "tub.port" = "tcp:${toString cfg.publicReadOnlyStoragePort}";
+                  "tub.location" = "tcp:${cfg.publicAddress}:${toString cfg.publicReadOnlyStoragePort}";
+                };
+              storage =
+                { enabled = true;
+                  storage_dir = "/storage";
+                  readonly = true;
+                  force_foolscap = false;
+                };
+            };
         };
-        node =
-        # XXX Should try to name that is unique across the grid.
-        { nickname = "${storage-node-name}";
+      # Tahoe nixos module brings along a single socket for the web api.
+      # That's for the other storage node though.  Turn off the integration
+      # with this one.
+      systemd.services."tahoe.ro-storage".unitConfig.Requires = [];
 
-          # We have the web port active because the CLI uses it and because it
-          # exposes a metrics endpoint for our monitoring system.  The actual
-          # port configuration lives in systemd so that it can order binding
-          # the socket correctly with other dependencies (which we can't
-          # reliably do with Tahoe without a bunch of other work).
-          "web.port" = "systemd:domain=INET:index=0";
+      services.tahoe.nodes."${storage-node-name}" =
+        { package = cfg.tahoe.package;
+          # Each attribute in this set corresponds to a section in the
+          # tahoe.cfg file.  Attributes on those sets correspond to individual
+          # assignments in those sections.
+          #
+          # We just populate this according to policy/preference of Private
+          # Storage.
+          sections =
+            { client = if cfg.introducerFURL == null then {} else
+                { "introducer.furl" = cfg.introducerFURL;
+                };
+              node =
+                # XXX Should try to name that is unique across the grid.
+                { nickname = "${storage-node-name}";
 
-          # We have to tell Tahoe-LAFS where to listen for Foolscap
-          # connections for the storage protocol.  We have to tell it twice.
-          # First, in the syntax which it uses to listen.
-          "tub.port" = "tcp:${toString cfg.publicStoragePort}";
+                  # We have the web port active because the CLI uses it and
+                  # because it exposes a metrics endpoint for our monitoring
+                  # system.  The actual port configuration lives in systemd so
+                  # that it can order binding the socket correctly with other
+                  # dependencies (which we can't reliably do with Tahoe
+                  # without a bunch of other work).
+                  "web.port" = "systemd:domain=INET:index=0";
 
-          # Second, in the syntax it advertises to in the fURL.
-          "tub.location" = "tcp:${cfg.publicAddress}:${toString cfg.publicStoragePort}";
-        };
-        storage =
-        { enabled = true;
-          # Put the storage where we have a lot of space configured.
-          storage_dir = "/storage";
-          # Turn on our plugin.
-          plugins = "privatestorageio-zkapauthz-v2";
-        };
-        "storageserver.plugins.privatestorageio-zkapauthz-v2" =
-        { "ristretto-issuer-root-url" = cfg.issuerRootURL;
-          "ristretto-signing-key-path" = cfg.ristrettoSigningKeyPath;
-        } // (
-          if cfg.passValue == null
-          then {}
-          else { "pass-value" = (toString cfg.passValue); }
-        );
-      };
-    };
+                  # We have to tell Tahoe-LAFS where to listen for Foolscap
+                  # connections for the storage protocol.  We have to tell it twice.
+                  # First, in the syntax which it uses to listen.
+                  "tub.port" = "tcp:${toString cfg.publicStoragePort}";
 
-    # Let traffic destined for the storage node's Foolscap server through.
-    networking.firewall.allowedTCPPorts = [ cfg.publicStoragePort ];
+                  # Second, in the syntax it advertises to in the fURL.
+                  "tub.location" = "tcp:${cfg.publicAddress}:${toString cfg.publicStoragePort}";
+                };
+              storage =
+                { enabled = true;
+                  # Put the storage where we have a lot of space configured.
+                  storage_dir = "/storage";
+                  # Turn on our plugin.
+                  plugins = "privatestorageio-zkapauthz-v2";
+                };
+              "storageserver.plugins.privatestorageio-zkapauthz-v2" =
+                { "ristretto-issuer-root-url" = cfg.issuerRootURL;
+                  "ristretto-signing-key-path" = cfg.ristrettoSigningKeyPath;
+                } // (
+                  if cfg.passValue == null
+                  then {}
+                  else { "pass-value" = (toString cfg.passValue); }
+                );
+            };
+        };
 
-    systemd.tmpfiles.rules =
-    # Add a rule to prevent incident reports from accumulating indefinitely.
-    # See tmpfiles.d(5) for the syntax.
-    [ "d ${incidents-dir} 0755 root root ${max-incident-age} -"
-    ];
+      # Let traffic destined for the storage node's Foolscap server through.
+      networking.firewall.allowedTCPPorts = [ cfg.publicStoragePort cfg.publicReadOnlyStoragePort ];
 
-    environment.systemPackages = [
-      # Provide a useful tool for reporting about shares.
-      ourpkgs.leasereport
-    ];
+      systemd.tmpfiles.rules =
+        # Add a rule to prevent incident reports from accumulating indefinitely.
+        # See tmpfiles.d(5) for the syntax.
+        [ "d ${incidents-dir} 0755 root root ${max-incident-age} -"
+        ];
 
-  };
+      environment.systemPackages = [
+        # Provide a useful tool for reporting about shares.
+        ourpkgs.leasereport
+      ];
+    };
 }
diff --git a/nixos/modules/tahoe.nix b/nixos/modules/tahoe.nix
index 51c8695420fc6e0b1b9bf2cc92fe0e6e0128ca6d..b53435080d104bccfed5e7e3004f7891f14159bf 100644
--- a/nixos/modules/tahoe.nix
+++ b/nixos/modules/tahoe.nix
@@ -221,10 +221,12 @@ in
               # arguments to $(tahoe run). The node directory must come first,
               # and arguments which alter Twisted's behavior come afterwards.
               ExecStart = ''
-                ${settings.package}/bin/tahoe --eliot-destination ${eliotLog} run ${nodedir} -n -l- --pidfile=${pidfile}
+                ${settings.package}/bin/tahoe --eliot-destination ${eliotLog} run --allow-stdin-close ${nodedir} -n -l- --pidfile=${pidfile}
               '';
+
               # Twisted wants non-blocking sockets:
               NonBlocking = true;
+
               # The rlimit on number of open files controls how many
               # connections a particular storage server can accept (factoring
               # in the number of non-connection files the server needs open -
diff --git a/nixos/pkgs/privatestorage/default.nix b/nixos/pkgs/privatestorage/default.nix
index e152f021cf5d6409d7b0f05582d28a44ccb87641..f2c7ddea1a2eb35259ca5dbbff3ccd86bdac3a04 100644
--- a/nixos/pkgs/privatestorage/default.nix
+++ b/nixos/pkgs/privatestorage/default.nix
@@ -4,7 +4,7 @@ let
   repo = fetchFromGitHub (builtins.removeAttrs repo-data [ "branch" ]);
   zk = import repo;
   # XXX package version choice here
-  zkapauthorizer = zk.outputs.packages.x86_64-linux.zkapauthorizer-python39-tahoe_1_17_1;
+  zkapauthorizer = zk.outputs.packages.x86_64-linux.zkapauthorizer-python39-tahoe_dev;
   python = zkapauthorizer.passthru.python;
 in
   python.withPackages (ps: [ zkapauthorizer ] )
diff --git a/nixos/pkgs/privatestorage/repo.json b/nixos/pkgs/privatestorage/repo.json
index f57181a6978b0c72bcf6cc9c99618f5afda3a927..a0aab4861c244c1b6663453c15a3e6a31f99ba0d 100644
--- a/nixos/pkgs/privatestorage/repo.json
+++ b/nixos/pkgs/privatestorage/repo.json
@@ -1,8 +1,8 @@
 {
   "owner": "PrivateStorageio",
-  "branch": "458.update-tahoe-lafs",
+  "branch": "main",
   "repo": "ZKAPAuthorizer",
-  "rev": "6f8d67c81cdc6de2f52e0a699a077b43232a0589",
+  "rev": "fb89e91a6c7f595cd0b1c7aa7055cbd32c482180",
   "outputHashAlgo": "sha512",
-  "outputHash": "3bg882wlm0bn23xazal81mzac63svg66gcbrabvzqyin98jrwlimk5n64hrdiywiw954g7srpdr1g9f1y4p79vbpnkfkrv7sa108aa4"
+  "outputHash": "3f44znykq8f7mcgdwdyhgf2dvnx7yydmlrjcr17mxfwya4jqmx8zb59mxkxvar0ahn639y2nq3bcqxdyipljfxilfi1cz21li908kkw"
 }
\ No newline at end of file
diff --git a/nixos/tests/exercise-storage.py b/nixos/tests/exercise-storage.py
index 288a846d87e6fc468f22bc0e634ae4430c17791b..a4e177b5aa9db7372a41214d2ab4afeef1d23c13 100755
--- a/nixos/tests/exercise-storage.py
+++ b/nixos/tests/exercise-storage.py
@@ -47,7 +47,12 @@ def block_until_connected(api_root):
             in servers
             if server["connection_status"].startswith("Connected to ")
         )
-        if len(connected) >= 1:
+        # There is a read-only server and a read-write server!  The easiest
+        # way to be sure we've connected to the read-write server is to wait
+        # until we're connected to both.  Also, if we manage to connect to two
+        # servers this gives us some confidence that both the read-only and
+        # read-write servers are running.
+        if len(connected) >= 2:
             print(
                 "Connected to a server:\n"
                 "\t{nodeid}\n"
diff --git a/nixos/tests/run-client.py b/nixos/tests/run-client.py
index 86909bde894225865c9f295f4ba3c461519141a4..403e47977ca675358dcdab6d0296ba006903b78c 100755
--- a/nixos/tests/run-client.py
+++ b/nixos/tests/run-client.py
@@ -43,7 +43,7 @@ def main():
         "daemonize",
         "-o", "/tmp/stdout",
         "-e", "/tmp/stderr",
-        which("tahoe"), "run", "/tmp/client",
+        which("tahoe"), "run", "--allow-stdin-close", "/tmp/client",
     ])
 
 def run(argv):
diff --git a/nixos/tests/run-introducer.py b/nixos/tests/run-introducer.py
index 33c3ec10369477e39c1461b3e59149e015f03ce9..9062c43243f3a5a672ae41d53ace636d7698843a 100755
--- a/nixos/tests/run-introducer.py
+++ b/nixos/tests/run-introducer.py
@@ -31,11 +31,11 @@ def main():
         "daemonize",
         "-o", "/tmp/stdout",
         "-e", "/tmp/stderr",
-        which("tahoe"), "run", "/tmp/introducer",
+        which("tahoe"), "run", "--allow-stdin-close", "/tmp/introducer",
     ])
 
     retry(
-        "waiting for open introducer port",
+        f"connect to introducer (port {introducerPort})",
         lambda: checkOpen(int(introducerPort)),
     )