Newer
Older
{ pkgs }:
let
sshPrivateKey = ./probeuser_ed25519;
sshPublicKey = ./probeuser_ed25519.pub;
sshUsers = {
root = (builtins.readFile sshPublicKey);
probeuser = (builtins.readFile sshPublicKey);
};
# Generate a command which can be used with runOnNode to ssh to the given
# host.
ssh = username: hostname: [
"cp" sshPrivateKey "/tmp/ssh_key" ";"
"chmod" "0400" "/tmp/ssh_key" ";"
"ssh" "-oStrictHostKeyChecking=no" "-i" "/tmp/ssh_key" "${username}@${hostname}" ":"
# Separate helper programs so we can write as little python inside a string
# inside a nix expression as possible.
run-introducer = ./run-introducer.py;
run-client = ./run-client.py;
get-passes = ./get-passes.py;
exercise-storage = ./exercise-storage.py;
# This is a test double of the Stripe API server. It is extremely simple.
# It barely knows how to respond to exactly the API endpoints we use,
# exactly how we use them.
stripe-api-double = ./stripe-api-double.py;
# The root URL of the Ristretto-flavored PrivacyPass issuer API.
voucher = "xyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxy";
# The issuer's signing key. Notionally, this is a secret key. This is only
# the value for this system test though so I don't care if it leaks to the
# world at large.
ristrettoSigningKeyPath =
let
key = "wumQAfSsJlQKDDSaFN/PZ3EbgBit8roVgfzllfCK2gQ=";
basename = "signing-key.private";
in
pkgs.writeText basename key;
stripeSecretKeyPath =
let
# Ugh.
key = "sk_test_blubblub";
basename = "stripe.secret";
in
pkgs.writeText basename key;
# Here are the preconstructed secrets which we can assign to the introducer.
# This is a lot easier than having the introducer generate them and then
# discovering and configuring the other nodes with them.
tubID = "rr7y46ixsg6qmck4jkkc7hke6xe4sv5f";
swissnum = "2k6p3wrabat5jrj7otcih4cjdema4q3m";
introducerPort = 35151;
location = "tcp:introducer:${toString introducerPort}";
introducerFURL = "pb://${tubID}@${location}/${swissnum}";
introducerFURLFile = pkgs.writeTextFile {
name = "introducer.furl";
text = introducerFURL;
};
networkConfig = {
# Just need to disable the firewall so all the traffic flows freely. We
# could do other network configuration here too, if we wanted. Initially
# I thought we might need to statically asssign IPs but we can just use
# the node names, "introducer", etc, instead.
networking.firewall.enable = false;
networking.dhcpcd.enable = false;
# Return a python program fragment to run a shell command on one of the nodes.
# The first argument is the name of the node. The second is a list of the
# argv to run.
#
# The program's output is piped to systemd-cat and the python fragment
# evaluates to success if the command exits with a success status.
runOnNode = node: argv:
let
command = builtins.concatStringsSep " " argv;
in
"${node}.succeed('set -eo pipefail; ${command} | systemd-cat')";
in {
# https://nixos.org/nixos/manual/index.html#sec-nixos-tests
# https://nixos.mayflower.consulting/blog/2019/07/11/leveraging-nixos-tests-in-your-project/
# Get a machine where we can run a Tahoe-LAFS client node.
client =
{ config, pkgs, ourpkgs, ... }:
{ imports = [ ../modules/packages.nix ];
environment.systemPackages = [
pkgs.daemonize
# A Tahoe-LAFS configuration capable of using the right storage
# plugin.
# Support for the tests we'll run.
(pkgs.python3.withPackages (ps: [ ps.requests ps.hyperlink ]))
];
# Get another machine where we can run a Tahoe-LAFS introducer node. It has the same configuration as the client.
introducer = client;
# Configure a single machine as a PrivateStorage storage node.
storage =
{ config, pkgs, ... }:
{ imports =
[ ../modules/packages.nix
../modules/private-storage.nix
../modules/ssh.nix
];
services.private-storage = {
enable = true;
introducerFURL = introducerFURL;
issuerRootURL = issuerURL;
inherit ristrettoSigningKeyPath;
inherit sshUsers;
# Operate an issuer as well.
issuer =
{ config, pkgs, ... }:
{ imports =
[ ../modules/packages.nix
../modules/issuer.nix
../modules/ssh.nix
services.private-storage.sshUsers = sshUsers;
services.private-storage-issuer = {
enable = true;
domains = ["issuer"];
letsEncryptAdminEmail = "user@example.invalid";
allowedChargeOrigins = [ "http://unused.invalid" ];
inherit stripeSecretKeyPath;
stripeEndpointDomain = "api_stripe_com";
stripeEndpointScheme = "HTTP";
stripeEndpointPort = 80;
# Also run a fake Stripe API endpoint server. Nodes in these tests run on
# a network without outside access so we can't easily use the real Stripe
# API endpoint and with this one we have greater control over the
# behavior, anyway, without all of the unintentional transient network
# errors that come from the public internet. These tests *aren't* meant
# to prove PaymentServer correctly interacts with the real Stripe API
# server so this is an unverified fake. The PaymentServer test suite
# needs to take care of any actual Stripe API integration testing.
"api_stripe_com" =
{ config, pkgs, ... }:
let python = pkgs.python3.withPackages (ps: [ ps.twisted ]);
in networkConfig // {
environment.systemPackages = [
python
pkgs.curl
];
systemd.services."api.stripe.com" = {
enable = true;
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
script = "${python}/bin/python ${stripe-api-double} tcp:80";
};
};
};
# Test the machines with a Python program.
testScript = ''
# Boot the VMs. We used to do them all in parallel but the boot
# sequence got flaky at some point for some reason I don't
# understand. :/ It might be related to this:
#
# https://discourse.nixos.org/t/nixos-ppc64le-vm-does-not-have-dev-vda-device/11548/9
#
# See <nixpkgs/nixos/modules/virtualisation/qemu-vm.nix> for the Nix
# that constructs the QEMU command that gets run.
#
# Boot them one at a time for now.
issuer.connect()
introducer.connect()
storage.connect()
client.connect()
api_stripe_com.connect()
# The issuer and the storage server should accept SSH connections. This
# doesn't prove it is so but if it fails it's a pretty good indication
# it isn't so.
storage.wait_for_open_port(22)
${runOnNode "issuer" (ssh "probeuser" "storage")}
${runOnNode "issuer" (ssh "root" "storage")}
issuer.wait_for_open_port(22)
${runOnNode "storage" (ssh "probeuser" "issuer")}
${runOnNode "storage" (ssh "root" "issuer")}
# Set up a Tahoe-LAFS introducer.
introducer.copy_from_host('${pemFile}', '/tmp/node.pem')
try:
${runOnNode "introducer" [ run-introducer "/tmp/node.pem" (toString introducerPort) introducerFURL ]}
code, output = introducer.execute('cat /tmp/stdout /tmp/stderr')
introducer.log(output)
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
raise
#
# Get a Tahoe-LAFS storage server up.
#
code, version = storage.execute('tahoe --version')
storage.log(version)
# The systemd unit should reach the running state.
storage.wait_for_unit('tahoe.storage.service')
# Some while after that the Tahoe-LAFS node should listen on the web API
# port. The port number here has to agree with the port number set in
# the private-storage.nix module.
storage.wait_for_open_port(3456)
# Once the web API is listening it should be possible to scrape some
# status from the node if it is really working.
storage.succeed('tahoe -d /var/db/tahoe-lafs/storage status')
# It should have Eliot logging turned on as well.
storage.succeed('[ -e /var/db/tahoe-lafs/storage/logs/eliot.json ]')
#
# Storage appears to be working so try to get a client to speak with it.
#
${runOnNode "client" [ run-client "/tmp/client" introducerFURL issuerURL ]}
client.wait_for_open_port(3456)
# Make sure the fake Stripe API server is ready for requests.
try:
api_stripe_com.wait_for_unit("api.stripe.com")
except:
code, output = api_stripe_com.execute('journalctl -u api.stripe.com')
api_stripe_com.log(output)
raise
# Get some ZKAPs from the issuer.
try:
${runOnNode "client" [
get-passes
"http://127.0.0.1:3456"
"/tmp/client/private/api_auth_token"
issuerURL
voucher
]}
except:
code, log = client.execute('cat /tmp/stdout /tmp/stderr');
client.log(output)
# Dump the fake Stripe API server logs, too, since the error may arise
# from a PaymentServer/Stripe interaction.
code, output = api_stripe_com.execute('journalctl -u api.stripe.com')
api_stripe_com.log(output)
raise
# The client should be prepped now. Make it try to use some storage.
try:
${runOnNode "client" [ exercise-storage "/tmp/client" ]}
code, output = client.execute('cat /tmp/stdout /tmp/stderr')
client.log(output)
raise
# It should be possible to restart the storage service without the
# storage node fURL changing.
try:
furlfile = '/var/db/tahoe-lafs/storage/private/storage-plugin.privatestorageio-zkapauthz-v1.furl'
before = storage.execute('cat ' + furlfile)
${runOnNode "storage" [ "systemctl" "restart" "tahoe.storage" ]}
after = storage.execute('cat ' + furlfile)
if (before != after):
raise Exception('fURL changes after storage node restart')
except:
code, output = storage.execute('cat /tmp/stdout /tmp/stderr')
storage.log(output)
raise
# The client should actually still work, too.
try:
${runOnNode "client" [ exercise-storage "/tmp/client" ]}
code, output = client.execute('cat /tmp/stdout /tmp/stderr')
client.log(output)
# The issuer metrics should be accessible from the monitoring network.
issuer.execute('ifconfig lo:fauxvpn 172.23.23.2/24')
issuer.wait_until_succeeds("nc -z 172.23.23.2 80")
issuer.succeed('curl --silent --insecure --fail --output /dev/null http://172.23.23.2/metrics')
# The issuer metrics should NOT be accessible from any other network.
issuer.fail('curl --silent --insecure --fail --output /dev/null http://localhost/metrics')
client.fail('curl --silent --insecure --fail --output /dev/null http://issuer/metrics')
issuer.execute('ifconfig lo:fauxvpn down')