diff --git a/nixos/modules/private-storage.nix b/nixos/modules/private-storage.nix
index cada491e04a49ce1e4931b58ffae6527f8cf77c5..3b979bfb92fbe9ec3f3a5057682ef91070611dee 100644
--- a/nixos/modules/private-storage.nix
+++ b/nixos/modules/private-storage.nix
@@ -21,14 +21,6 @@ let
   max-incident-age = "29d";
 in
 {
-  # Upstream tahoe-lafs module conflicts with ours (since ours is a
-  # copy/paste/edit of upstream's...).  Disable it.
-  #
-  # https://nixos.org/nixos/manual/#sec-replace-modules
-  disabledModules =
-  [ "services/network-filesystems/tahoe.nix"
-  ];
-
   imports = [
     # Give it a good SSH configuration.
     ./ssh.nix
diff --git a/nixos/modules/tahoe.nix b/nixos/modules/tahoe.nix
index 8ea358863c8939d84857f6259b5f4370a401d908..cb0abf0813e3646a6054e4746726f7d507de484c 100644
--- a/nixos/modules/tahoe.nix
+++ b/nixos/modules/tahoe.nix
@@ -9,6 +9,14 @@ let
   ini = pkgs.callPackage ../lib/ini.nix { };
 in
   {
+    # Upstream tahoe-lafs module conflicts with ours (since ours is a
+    # copy/paste/edit of upstream's...).  Disable it.
+    #
+    # https://nixos.org/nixos/manual/#sec-replace-modules
+    disabledModules =
+    [ "services/network-filesystems/tahoe.nix"
+    ];
+
     options.services.tahoe = {
       introducers = mkOption {
         default = {};
@@ -233,22 +241,36 @@ in
               created = "${nodedir}.created";
               atomic = "${nodedir}.atomic";
             in ''
+              set -eo pipefail
               if [ ! -e ${created} ]; then
                 mkdir -p /var/db/tahoe-lafs/
 
                 # Get rid of any prior partial efforts.  It might not exist.
                 # Don't let this tank us.
-                rm -rv ${atomic} && [ ! -e ${atomic} ]
+                rm -rv ${atomic} || [ ! -e ${atomic} ]
 
                 # Really create the node.
                 tahoe create-node --hostname=localhost ${atomic}
 
-                # Move it to the real location.  We don't create it in-place
-                # because we might fail partway through and leave inconsistent
-                # state.  Also, systemd probably created logs/incidents/ already and
-                # `create-node` complains if it finds these exist already.
-                rm -rv ${nodedir} && [ ! -e ${nodedir} ]
+                # Get rid of any existing partially created node directory
+                # that might be in the way.
+                if [ -e ${nodedir} ]; then
+                  for backup in $(seq 1 100); do
+                    if [ ! -e ${nodedir}.$backup ]; then
+                      mv ${nodedir} ${nodedir}.$backup
+                      break
+                    fi
+                  done
+                fi
+
+                # Move the new thing to the real location.  We don't create it
+                # in-place because we might fail partway through and leave
+                # inconsistent state.  Also, systemd probably created
+                # logs/incidents/ already and `create-node` complains if it
+                # finds these exist already.
                 mv ${atomic} ${nodedir}
+
+                # Record our complete, consistent success.
                 touch ${created}
               fi
 
diff --git a/nixos/modules/tests/exercise-storage.py b/nixos/modules/tests/exercise-storage.py
index 88cd34bb73102ddbeb6178bfe13e8ed202a8be6e..e3a1d4d2ec7674042487cc0c6dabc670fcd6561d 100755
--- a/nixos/modules/tests/exercise-storage.py
+++ b/nixos/modules/tests/exercise-storage.py
@@ -10,6 +10,8 @@ from sys import argv
 from os import urandom
 from subprocess import check_output
 from io import BytesIO
+from time import sleep, ctime
+from pprint import pprint
 
 import requests
 import hyperlink
@@ -21,10 +23,45 @@ def main():
 
     api_root = get_api_root(clientDir)
 
+    block_until_connected(api_root)
+
     subject_cap = exercise_immutable(api_root, someData)
     newDir = exercise_mkdir(api_root)
     exercise_link_unlink(api_root, newDir, subject_cap)
 
+def block_until_connected(api_root):
+    """
+    Block until the Tahoe-LAFS node at the given API root reports it has
+    connected to at least one storage server.
+    """
+    while True:
+        response = requests.get(
+            api_root.replace(query={u"t": u"json"}),
+        )
+        response.raise_for_status()
+        welcome = response.json()
+        servers = welcome["servers"]
+        connected = list(
+            server
+            for server
+            in servers
+            if server["connection_status"].startswith("Connected to ")
+        )
+        if len(connected) >= 1:
+            print(
+                "Connected to a server:\n"
+                "\t{nodeid}\n"
+                "\t{status}\n"
+                "\t{last_received_data}\n".format(
+                    nodeid=connected[0]["nodeid"],
+                    status=connected[0]["connection_status"],
+                    last_received_data=ctime(connected[0]["last_received_data"]),
+                ),
+            )
+            return
+        pprint(welcome)
+        sleep(0.1)
+
 def exercise_immutable(api_root, someData):
     cap = tahoe_put(api_root, someData)
     dataReadBack = tahoe_get(api_root, cap)
diff --git a/nixos/modules/tests/private-storage.nix b/nixos/modules/tests/private-storage.nix
index 47acfbf475a5d029b5f2800d4a11e2ff18eaa9d2..b51f85cfa984e60e14e692e43697b75835986774 100644
--- a/nixos/modules/tests/private-storage.nix
+++ b/nixos/modules/tests/private-storage.nix
@@ -1,5 +1,5 @@
+{ pkgs }:
 let
-  pkgs = import <nixpkgs> { };
   pspkgs = import ../pspkgs.nix { inherit pkgs; };
 
   sshPrivateKey = ./probeuser_ed25519;
@@ -89,10 +89,9 @@ let
       # succeed() is not success but 1 is.
       1;
       ";
-in
-# https://nixos.org/nixos/manual/index.html#sec-nixos-tests
-import <nixpkgs/nixos/tests/make-test.nix> {
-
+in {
+  # https://nixos.org/nixos/manual/index.html#sec-nixos-tests
+  # https://nixos.mayflower.consulting/blog/2019/07/11/leveraging-nixos-tests-in-your-project/
   nodes = rec {
     # Get a machine where we can run a Tahoe-LAFS client node.
     client =
@@ -202,7 +201,6 @@ import <nixpkgs/nixos/tests/make-test.nix> {
       eval {
       ${runOnNode "introducer" [ run-introducer "/tmp/node.pem" (toString introducerPort) introducerFURL ]}
       } or do {
-        my $error = $@ || 'Unknown failure';
         my ($code, $log) = $introducer->execute('cat /tmp/stdout /tmp/stderr');
         $introducer->log($log);
         die $@;
@@ -246,7 +244,6 @@ import <nixpkgs/nixos/tests/make-test.nix> {
       eval {
         ${runOnNode "client" [ get-passes "http://127.0.0.1:3456" issuerURL voucher ]}
       } or do {
-        my $error = $@ || 'Unknown failure';
         my ($code, $log) = $client->execute('cat /tmp/stdout /tmp/stderr');
         $client->log($log);
 
@@ -261,10 +258,34 @@ import <nixpkgs/nixos/tests/make-test.nix> {
       eval {
         ${runOnNode "client" [ exercise-storage "/tmp/client" ]}
       } or do {
-        my $error = $@ || 'Unknown failure';
         my ($code, $log) = $client->execute('cat /tmp/stdout /tmp/stderr');
         $client->log($log);
         die $@;
       };
-      '';
-}
+
+      # It should be possible to restart the storage service without the
+      # storage node fURL changing.
+      eval {
+        my $furlfile = '/var/db/tahoe-lafs/storage/private/storage-plugin.privatestorageio-zkapauthz-v1.furl';
+        my $before = $storage->execute('cat ' . $furlfile);
+        ${runOnNode "storage" [ "systemctl" "restart" "tahoe.storage" ]}
+        my $after = $storage->execute('cat ' . $furlfile);
+        if ($before != $after) {
+          die 'fURL changes after storage node restart';
+        }
+        1;
+      } or do {
+        my ($code, $log) = $storage->execute('cat /tmp/stdout /tmp/stderr');
+        $storage->log($log);
+        die $@;
+      };
+
+      # The client should actually still work, too.
+      eval {
+        ${runOnNode "client" [ exercise-storage "/tmp/client" ]}
+      } or do {
+        my ($code, $log) = $client->execute('cat /tmp/stdout /tmp/stderr');
+        $client->log($log);
+        die $@;
+      };
+      ''; }
diff --git a/nixos/modules/tests/tahoe.nix b/nixos/modules/tests/tahoe.nix
new file mode 100644
index 0000000000000000000000000000000000000000..be3d51a0c4fbfd4cac73b4a990c4507dcb6b674e
--- /dev/null
+++ b/nixos/modules/tests/tahoe.nix
@@ -0,0 +1,72 @@
+{ ... }: {
+  nodes = {
+    storage = { config, pkgs, ... }: {
+      imports = [
+        ../tahoe.nix
+      ];
+
+      services.tahoe.nodes.storage = {
+        package = (pkgs.callPackage ../pspkgs.nix { }).privatestorage;
+        sections = {
+          node = {
+            nickname = "storage";
+            "web.port" = "tcp:4000:interface=127.0.0.1";
+            "tub.port" = "tcp:4001";
+            "tub.location" = "tcp:127.0.0.1:4001";
+          };
+          storage = {
+            enabled = true;
+          };
+        };
+      };
+    };
+  };
+  testScript = ''
+  startAll;
+
+  # After the service starts, destroy the "created" marker to force it to
+  # re-create its internal state.
+  $storage->waitForOpenPort(4001);
+  $storage->succeed("systemctl stop tahoe.storage");
+  $storage->succeed("rm /var/db/tahoe-lafs/storage.created");
+  $storage->succeed("systemctl start tahoe.storage");
+
+  # After it starts up again, verify it has consistent internal state and a
+  # backup of the prior state.
+  $storage->waitForOpenPort(4001);
+  $storage->succeed("[ -e /var/db/tahoe-lafs/storage ]");
+  $storage->succeed("[ -e /var/db/tahoe-lafs/storage.created ]");
+  $storage->succeed("[ -e /var/db/tahoe-lafs/storage.1 ]");
+  $storage->succeed("[ -e /var/db/tahoe-lafs/storage.1/private/node.privkey ]");
+  $storage->succeed("[ -e /var/db/tahoe-lafs/storage.1/private/node.pem ]");
+  $storage->succeed("[ ! -e /var/db/tahoe-lafs/storage.2 ]");
+
+  # Stop it again, once again destroy the "created" marker, and this time also
+  # jam some partial state in the way that will need cleanup.
+  $storage->succeed("systemctl stop tahoe.storage");
+  $storage->succeed("rm /var/db/tahoe-lafs/storage.created");
+  $storage->succeed("mkdir -p /var/db/tahoe-lafs/storage.atomic/partial");
+  eval {
+    $storage->succeed("systemctl start tahoe.storage");
+    1;
+  } or do {
+    my ($x, $y) = $storage->execute("journalctl -u tahoe.storage");
+    $storage->log($y);
+    die $@;
+  };
+
+  # After it starts up again, verify it has consistent internal state and
+  # backups of the prior two states.  It also has no copy of the inconsistent
+  # state because it could never have been used.
+  $storage->waitForOpenPort(4001);
+  $storage->succeed("[ -e /var/db/tahoe-lafs/storage ]");
+  $storage->succeed("[ -e /var/db/tahoe-lafs/storage.created ]");
+  $storage->succeed("[ -e /var/db/tahoe-lafs/storage.1 ]");
+  $storage->succeed("[ -e /var/db/tahoe-lafs/storage.2 ]");
+  $storage->succeed("[ -e /var/db/tahoe-lafs/storage.2/private/node.privkey ]");
+  $storage->succeed("[ -e /var/db/tahoe-lafs/storage.2/private/node.pem ]");
+  $storage->succeed("[ ! -e /var/db/tahoe-lafs/storage.atomic ]");
+  $storage->succeed("[ ! -e /var/db/tahoe-lafs/storage/partial ]");
+  $storage->succeed("[ ! -e /var/db/tahoe-lafs/storage.3 ]");
+  '';
+}
diff --git a/nixos/system-tests.nix b/nixos/system-tests.nix
index 5cc4088c49a27ab7745e7e9f2a4dc1ad8b01ec93..b2556d4692ee0c3eff96554fa7c13df59ec94507 100644
--- a/nixos/system-tests.nix
+++ b/nixos/system-tests.nix
@@ -1,5 +1,7 @@
 # The overall system test suite for PrivateStorageio NixOS configuration.
-
-# There is only one system test so far so I don't have to do anything to
-# aggregate multiple tests...
-import ./modules/tests/private-storage.nix
+let
+  pkgs = import <nixpkgs> { };
+in {
+  private-storage = pkgs.nixosTest ./modules/tests/private-storage.nix;
+  tahoe = pkgs.nixosTest ./modules/tests/tahoe.nix;
+}