diff --git a/nixos/modules/tests/private-storage.nix b/nixos/modules/tests/private-storage.nix
index 353abc891fafd1cc988e47a1befa530a012470dc..59c572fabc6730d2b8351b4bcd37987adab2f88b 100644
--- a/nixos/modules/tests/private-storage.nix
+++ b/nixos/modules/tests/private-storage.nix
@@ -82,11 +82,10 @@ let
     let
       command = builtins.concatStringsSep " " argv;
     in
-      "
-      \$${node}->succeed('set -eo pipefail; ${command} | systemd-cat');
-      # succeed() is not success but 1 is.
-      1;
-      ";
+      "${node}.succeed('set -eo pipefail; ${command} | systemd-cat')";
+
+  pspkgs = import ../../../nixpkgs-ps.nix { };
+
 in {
   # https://nixos.org/nixos/manual/index.html#sec-nixos-tests
   # https://nixos.mayflower.consulting/blog/2019/07/11/leveraging-nixos-tests-in-your-project/
@@ -98,7 +97,7 @@ in {
           pkgs.daemonize
           # A Tahoe-LAFS configuration capable of using the right storage
           # plugin.
-          pkgs.privatestorage
+          pspkgs.privatestorage
           # Support for the tests we'll run.
           (pkgs.python3.withPackages (ps: [ ps.requests ps.hyperlink ]))
         ];
@@ -174,138 +173,126 @@ in {
       };
   };
 
-  # Test the machines with a Perl program (sobbing).
-  testScript =
-    ''
-      # Boot the VMs.  We used to do them all in parallel but the boot
-      # sequence got flaky at some point for some reason I don't
-      # understand. :/ It might be related to this:
-      #
-      # https://discourse.nixos.org/t/nixos-ppc64le-vm-does-not-have-dev-vda-device/11548/9
-      #
-      # See <nixpkgs/nixos/modules/virtualisation/qemu-vm.nix> for the Nix
-      # that constructs the QEMU command that gets run.
-      #
-      # Boot them one at a time for now.
-      $issuer->connect();
-      $introducer->connect();
-      $storage->connect();
-      $client->connect();
-      $api_stripe_com->connect();
-
-      # The issuer and the storage server should accept SSH connections.  This
-      # doesn't prove it is so but if it fails it's a pretty good indication
-      # it isn't so.
-      $storage->waitForOpenPort(22);
-      ${runOnNode "issuer" (ssh "probeuser" "storage")}
-      ${runOnNode "issuer" (ssh "root" "storage")}
-      $issuer->waitForOpenPort(22);
-      ${runOnNode "storage" (ssh "probeuser" "issuer")}
-      ${runOnNode "storage" (ssh "root" "issuer")}
-
-      # Set up a Tahoe-LAFS introducer.
-      $introducer->copyFileFromHost(
-          '${pemFile}',
-          '/tmp/node.pem'
-      );
-
-      eval {
+  # Test the machines with a Python program.
+  testScript = ''
+    # Boot the VMs.  We used to do them all in parallel but the boot
+    # sequence got flaky at some point for some reason I don't
+    # understand. :/ It might be related to this:
+    #
+    # https://discourse.nixos.org/t/nixos-ppc64le-vm-does-not-have-dev-vda-device/11548/9
+    #
+    # See <nixpkgs/nixos/modules/virtualisation/qemu-vm.nix> for the Nix
+    # that constructs the QEMU command that gets run.
+    #
+    # Boot them one at a time for now.
+    issuer.connect()
+    introducer.connect()
+    storage.connect()
+    client.connect()
+    api_stripe_com.connect()
+
+    # The issuer and the storage server should accept SSH connections.  This
+    # doesn't prove it is so but if it fails it's a pretty good indication
+    # it isn't so.
+    storage.wait_for_open_port(22)
+    ${runOnNode "issuer" (ssh "probeuser" "storage")}
+    ${runOnNode "issuer" (ssh "root" "storage")}
+    issuer.wait_for_open_port(22)
+    ${runOnNode "storage" (ssh "probeuser" "issuer")}
+    ${runOnNode "storage" (ssh "root" "issuer")}
+
+    # Set up a Tahoe-LAFS introducer.
+    introducer.copy_from_host('${pemFile}', '/tmp/node.pem')
+
+    try:
       ${runOnNode "introducer" [ run-introducer "/tmp/node.pem" (toString introducerPort) introducerFURL ]}
-      } or do {
-        my ($code, $log) = $introducer->execute('cat /tmp/stdout /tmp/stderr');
-        $introducer->log($log);
-        die $@;
-      };
-
-      #
-      # Get a Tahoe-LAFS storage server up.
-      #
-      my ($code, $version) = $storage->execute('tahoe --version');
-      $storage->log($version);
-
-      # The systemd unit should reach the running state.
-      $storage->waitForUnit('tahoe.storage.service');
-
-      # Some while after that the Tahoe-LAFS node should listen on the web API
-      # port. The port number here has to agree with the port number set in
-      # the private-storage.nix module.
-      $storage->waitForOpenPort(3456);
-
-      # Once the web API is listening it should be possible to scrape some
-      # status from the node if it is really working.
-      $storage->succeed('tahoe -d /var/db/tahoe-lafs/storage status');
-
-      # It should have Eliot logging turned on as well.
-      $storage->succeed('[ -e /var/db/tahoe-lafs/storage/logs/eliot.json ]');
-
-      #
-      # Storage appears to be working so try to get a client to speak with it.
-      #
-      ${runOnNode "client" [ run-client "/tmp/client" introducerFURL issuerURL ]}
-      $client->waitForOpenPort(3456);
-
-      # Make sure the fake Stripe API server is ready for requests.
-      eval {
-        $api_stripe_com->waitForUnit("api.stripe.com");
-        1;
-      } or do {
-        my ($code, $log) = $api_stripe_com->execute('journalctl -u api.stripe.com');
-        $api_stripe_com->log($log);
-        die $@;
-      };
-
-      # Get some ZKAPs from the issuer.
-      eval {
-        ${runOnNode "client" [
-          get-passes
-          "http://127.0.0.1:3456"
-          "/tmp/client/private/api_auth_token"
-          issuerURL
-          voucher
-        ]}
-      } or do {
-        my ($code, $log) = $client->execute('cat /tmp/stdout /tmp/stderr');
-        $client->log($log);
-
-        # Dump the fake Stripe API server logs, too, since the error may arise
-        # from a PaymentServer/Stripe interaction.
-        my ($code, $log) = $api_stripe_com->execute('journalctl -u api.stripe.com');
-        $api_stripe_com->log($log);
-        die $@;
-      };
-
-      # The client should be prepped now.  Make it try to use some storage.
-      eval {
-        ${runOnNode "client" [ exercise-storage "/tmp/client" ]}
-      } or do {
-        my ($code, $log) = $client->execute('cat /tmp/stdout /tmp/stderr');
-        $client->log($log);
-        die $@;
-      };
-
-      # It should be possible to restart the storage service without the
-      # storage node fURL changing.
-      eval {
-        my $furlfile = '/var/db/tahoe-lafs/storage/private/storage-plugin.privatestorageio-zkapauthz-v1.furl';
-        my $before = $storage->execute('cat ' . $furlfile);
-        ${runOnNode "storage" [ "systemctl" "restart" "tahoe.storage" ]}
-        my $after = $storage->execute('cat ' . $furlfile);
-        if ($before != $after) {
-          die 'fURL changes after storage node restart';
-        }
-        1;
-      } or do {
-        my ($code, $log) = $storage->execute('cat /tmp/stdout /tmp/stderr');
-        $storage->log($log);
-        die $@;
-      };
-
-      # The client should actually still work, too.
-      eval {
-        ${runOnNode "client" [ exercise-storage "/tmp/client" ]}
-      } or do {
-        my ($code, $log) = $client->execute('cat /tmp/stdout /tmp/stderr');
-        $client->log($log);
-        die $@;
-      };
-      ''; }
+    except:
+      code, log = introducer.execute('cat /tmp/stdout /tmp/stderr')
+      introducer.log(log)
+      raise
+
+    #
+    # Get a Tahoe-LAFS storage server up.
+    #
+    code, version = storage.execute('tahoe --version')
+    storage.log(version)
+
+    # The systemd unit should reach the running state.
+    storage.wait_for_unit('tahoe.storage.service')
+
+    # Some while after that the Tahoe-LAFS node should listen on the web API
+    # port. The port number here has to agree with the port number set in
+    # the private-storage.nix module.
+    storage.wait_for_open_port(3456)
+
+    # Once the web API is listening it should be possible to scrape some
+    # status from the node if it is really working.
+    storage.succeed('tahoe -d /var/db/tahoe-lafs/storage status')
+
+    # It should have Eliot logging turned on as well.
+    storage.succeed('[ -e /var/db/tahoe-lafs/storage/logs/eliot.json ]')
+
+    #
+    # Storage appears to be working so try to get a client to speak with it.
+    #
+    ${runOnNode "client" [ run-client "/tmp/client" introducerFURL issuerURL ]}
+    client.wait_for_open_port(3456)
+
+    # Make sure the fake Stripe API server is ready for requests.
+    try:
+      api_stripe_com.wait_for_unit("api.stripe.com")
+    except:
+      code, log = api_stripe_com.execute('journalctl -u api.stripe.com')
+      api_stripe_com.log(log)
+      raise
+
+    # Get some ZKAPs from the issuer.
+    try:
+      ${runOnNode "client" [
+        get-passes
+        "http://127.0.0.1:3456"
+        "/tmp/client/private/api_auth_token"
+        issuerURL
+        voucher
+      ]}
+    except:
+      code, log = client.execute('cat /tmp/stdout /tmp/stderr');
+      client.log(log)
+
+      # Dump the fake Stripe API server logs, too, since the error may arise
+      # from a PaymentServer/Stripe interaction.
+      code, log = api_stripe_com.execute('journalctl -u api.stripe.com')
+      api_stripe_com.log(log)
+      raise
+
+    # The client should be prepped now.  Make it try to use some storage.
+    try:
+      ${runOnNode "client" [ exercise-storage "/tmp/client" ]}
+    except:
+      code, log = client.execute('cat /tmp/stdout /tmp/stderr')
+      client.log(log)
+      raise
+
+    # It should be possible to restart the storage service without the
+    # storage node fURL changing.
+    try:
+      furlfile = '/var/db/tahoe-lafs/storage/private/storage-plugin.privatestorageio-zkapauthz-v1.furl'
+      before = storage.execute('cat ' + furlfile)
+      ${runOnNode "storage" [ "systemctl" "restart" "tahoe.storage" ]}
+      after = storage.execute('cat ' + furlfile)
+      if (before != after):
+        raise Exception('fURL changes after storage node restart')
+    except:
+      code, log = storage.execute('cat /tmp/stdout /tmp/stderr')
+      storage.log(log)
+      raise
+
+    # The client should actually still work, too.
+    try:
+      ${runOnNode "client" [ exercise-storage "/tmp/client" ]}
+    except:
+      code, log = client.execute('cat /tmp/stdout /tmp/stderr')
+      client.log(log)
+      raise
+  '';
+}
diff --git a/nixos/modules/tests/tahoe.nix b/nixos/modules/tests/tahoe.nix
index df7acdf3cde3e8101a1119dbce127b17a68ef589..a582accfc09c404383d796c28b6072de70a02ce7 100644
--- a/nixos/modules/tests/tahoe.nix
+++ b/nixos/modules/tests/tahoe.nix
@@ -1,4 +1,7 @@
-{ ... }: {
+{ ... }:
+  let
+    pspkgs = import ../../../nixpkgs-ps.nix { };
+  in {
   nodes = {
     storage = { config, pkgs, ... }: {
       imports = [
@@ -6,7 +9,7 @@
       ];
 
       services.tahoe.nodes.storage = {
-        package = pkgs.privatestorage;
+        package = pspkgs.privatestorage;
         sections = {
           node = {
             nickname = "storage";
@@ -22,51 +25,49 @@
     };
   };
   testScript = ''
-  startAll;
+  start_all()
 
   # After the service starts, destroy the "created" marker to force it to
   # re-create its internal state.
-  $storage->waitForOpenPort(4001);
-  $storage->succeed("systemctl stop tahoe.storage");
-  $storage->succeed("rm /var/db/tahoe-lafs/storage.created");
-  $storage->succeed("systemctl start tahoe.storage");
+  storage.wait_for_open_port(4001)
+  storage.succeed("systemctl stop tahoe.storage")
+  storage.succeed("rm /var/db/tahoe-lafs/storage.created")
+  storage.succeed("systemctl start tahoe.storage")
 
   # After it starts up again, verify it has consistent internal state and a
   # backup of the prior state.
-  $storage->waitForOpenPort(4001);
-  $storage->succeed("[ -e /var/db/tahoe-lafs/storage ]");
-  $storage->succeed("[ -e /var/db/tahoe-lafs/storage.created ]");
-  $storage->succeed("[ -e /var/db/tahoe-lafs/storage.1 ]");
-  $storage->succeed("[ -e /var/db/tahoe-lafs/storage.1/private/node.privkey ]");
-  $storage->succeed("[ -e /var/db/tahoe-lafs/storage.1/private/node.pem ]");
-  $storage->succeed("[ ! -e /var/db/tahoe-lafs/storage.2 ]");
+  storage.wait_for_open_port(4001)
+  storage.succeed("[ -e /var/db/tahoe-lafs/storage ]")
+  storage.succeed("[ -e /var/db/tahoe-lafs/storage.created ]")
+  storage.succeed("[ -e /var/db/tahoe-lafs/storage.1 ]")
+  storage.succeed("[ -e /var/db/tahoe-lafs/storage.1/private/node.privkey ]")
+  storage.succeed("[ -e /var/db/tahoe-lafs/storage.1/private/node.pem ]")
+  storage.succeed("[ ! -e /var/db/tahoe-lafs/storage.2 ]")
 
   # Stop it again, once again destroy the "created" marker, and this time also
   # jam some partial state in the way that will need cleanup.
-  $storage->succeed("systemctl stop tahoe.storage");
-  $storage->succeed("rm /var/db/tahoe-lafs/storage.created");
-  $storage->succeed("mkdir -p /var/db/tahoe-lafs/storage.atomic/partial");
-  eval {
-    $storage->succeed("systemctl start tahoe.storage");
-    1;
-  } or do {
-    my ($x, $y) = $storage->execute("journalctl -u tahoe.storage");
-    $storage->log($y);
-    die $@;
-  };
+  storage.succeed("systemctl stop tahoe.storage")
+  storage.succeed("rm /var/db/tahoe-lafs/storage.created")
+  storage.succeed("mkdir -p /var/db/tahoe-lafs/storage.atomic/partial")
+  try:
+    storage.succeed("systemctl start tahoe.storage")
+  except:
+    x, y = storage.execute("journalctl -u tahoe.storage")
+    storage.log(y)
+    raise
 
   # After it starts up again, verify it has consistent internal state and
   # backups of the prior two states.  It also has no copy of the inconsistent
   # state because it could never have been used.
-  $storage->waitForOpenPort(4001);
-  $storage->succeed("[ -e /var/db/tahoe-lafs/storage ]");
-  $storage->succeed("[ -e /var/db/tahoe-lafs/storage.created ]");
-  $storage->succeed("[ -e /var/db/tahoe-lafs/storage.1 ]");
-  $storage->succeed("[ -e /var/db/tahoe-lafs/storage.2 ]");
-  $storage->succeed("[ -e /var/db/tahoe-lafs/storage.2/private/node.privkey ]");
-  $storage->succeed("[ -e /var/db/tahoe-lafs/storage.2/private/node.pem ]");
-  $storage->succeed("[ ! -e /var/db/tahoe-lafs/storage.atomic ]");
-  $storage->succeed("[ ! -e /var/db/tahoe-lafs/storage/partial ]");
-  $storage->succeed("[ ! -e /var/db/tahoe-lafs/storage.3 ]");
+  storage.wait_for_open_port(4001)
+  storage.succeed("[ -e /var/db/tahoe-lafs/storage ]")
+  storage.succeed("[ -e /var/db/tahoe-lafs/storage.created ]")
+  storage.succeed("[ -e /var/db/tahoe-lafs/storage.1 ]")
+  storage.succeed("[ -e /var/db/tahoe-lafs/storage.2 ]")
+  storage.succeed("[ -e /var/db/tahoe-lafs/storage.2/private/node.privkey ]")
+  storage.succeed("[ -e /var/db/tahoe-lafs/storage.2/private/node.pem ]")
+  storage.succeed("[ ! -e /var/db/tahoe-lafs/storage.atomic ]")
+  storage.succeed("[ ! -e /var/db/tahoe-lafs/storage/partial ]")
+  storage.succeed("[ ! -e /var/db/tahoe-lafs/storage.3 ]")
   '';
 }
diff --git a/nixos/system-tests.nix b/nixos/system-tests.nix
index 5f51d01dd57267b75b3742c76c03c1393676d426..ff37fc85a854f1780e65956da5a71825833f06f4 100644
--- a/nixos/system-tests.nix
+++ b/nixos/system-tests.nix
@@ -1,6 +1,6 @@
 # The overall system test suite for PrivateStorageio NixOS configuration.
 let
-  pkgs = import ../nixpkgs-ps.nix { };
+  pkgs = import ../nixpkgs-2105.nix { };
 in {
   private-storage = pkgs.nixosTest ./modules/tests/private-storage.nix;
   tahoe = pkgs.nixosTest ./modules/tests/tahoe.nix;