Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision

Target

Select target project
  • tomprince/PrivateStorageio
  • privatestorage/PrivateStorageio
2 results
Select Git revision
Show changes
Showing
with 480 additions and 273 deletions
{
"owner": "PrivateStorageio",
"branch": "main",
"repo": "ZKAPAuthorizer",
"rev": "fb89e91a6c7f595cd0b1c7aa7055cbd32c482180",
"outputHashAlgo": "sha512",
"outputHash": "3f44znykq8f7mcgdwdyhgf2dvnx7yydmlrjcr17mxfwya4jqmx8zb59mxkxvar0ahn639y2nq3bcqxdyipljfxilfi1cz21li908kkw"
}
\ No newline at end of file
{ {
"owner": "privatestorage", "owner": "privatestorage",
"repo": "zkap-spending-service", "repo": "zkap-spending-service",
"rev": "e0d63b79213d16f2de6629167ea8f1236ba22e14", "rev": "66fd395268b466d4c7bb0a740fb758a5acccd1c4",
"branch": "main", "branch": "main",
"domain": "whetstone.privatestorage.io", "domain": "whetstone.private.storage",
"outputHash": "30abb0g9xxn4lp493kj5wmz8kj5q2iqvw40m8llqvb3zamx60gd8cy451ii7z15qbrbx9xmjdfw0k4gviij46fkx1s8nbich5c8qx57", "outputHash": "1nryvsccncrka25kzrwqkif4x68ib0cs2vbw1ngfmzw86gjgqx01a7acgspmrpfs62p4q8zw0f2ynl8jr3ygyypjrl8v7w8g49y0y0y",
"outputHashAlgo": "sha512" "outputHashAlgo": "sha512"
} }
{ callPackage }: { callPackage, fetchFromGitHub, lib }:
let let
repo = callPackage ./repo.nix { }; repo-data = lib.importJSON ./repo.json;
repo = fetchFromGitHub (builtins.removeAttrs repo-data [ "branch" ]);
PaymentServer = (import "${repo}/nix").PaymentServer; PaymentServer = (import "${repo}/nix").PaymentServer;
in in
PaymentServer.components.exes."PaymentServer-exe" PaymentServer.components.exes."PaymentServer-exe"
{
"owner": "PrivateStorageio",
"repo": "PaymentServer",
"rev": "8f5c3ede67c767545ba9527fc208b43603ce2ea9",
"branch": "update-deps-2025-02",
"outputHashAlgo": "sha512",
"outputHash": "3mfyj1q83ivcqa1imwiwyy1222xr03i08kmfs0wdya3ji9brmz8kagg6rwc8fc62ir9n4a61ma052lbwinq1b7khpww509dz0xkxz8g"
}
\ No newline at end of file
{ fetchFromGitHub }:
fetchFromGitHub {
owner = "PrivateStorageio";
repo = "PaymentServer";
rev = "ff30e85c231a3b5ad76426bbf8801f8f76884367";
sha256 = "1spz19f5z96shmfpazj0rv6877xvchf3gl49a4xahjbbsz39x34x";
}
# The overall system test suite for PrivateStorageio NixOS configuration. # The overall system test suite for PrivateStorageio NixOS configuration.
{ pkgs }:
let let
pkgs = import ../nixpkgs-2105.nix { }; # Add custom packages as an attribute, so it they only need to be evalutated once.
# See the comment in `morph/lib/default.nix` for details.
pkgs' = pkgs.extend (self: super: { ourpkgs = self.callPackage ./pkgs {}; });
in { in {
private-storage = pkgs.nixosTest ./tests/private-storage.nix; private-storage = pkgs'.nixosTest ./tests/private-storage.nix;
spending = pkgs.nixosTest ./tests/spending.nix;
tahoe = pkgs.nixosTest ./tests/tahoe.nix; # The spending service is not deployed so it doesn't seem *necessary* to run
# its test suite here. The packaging still uses mach-nix which is
# incompatible with NixOS 22.11 so we can't actually load the ZKAP spending
# service derivation anymore. So ... disable the test suite.
#
# spending = pkgs'.nixosTest ./tests/spending.nix;
tahoe = pkgs'.nixosTest ./tests/tahoe.nix;
} }
...@@ -47,7 +47,12 @@ def block_until_connected(api_root): ...@@ -47,7 +47,12 @@ def block_until_connected(api_root):
in servers in servers
if server["connection_status"].startswith("Connected to ") if server["connection_status"].startswith("Connected to ")
) )
if len(connected) >= 1: # There is a read-only server and a read-write server! The easiest
# way to be sure we've connected to the read-write server is to wait
# until we're connected to both. Also, if we manage to connect to two
# servers this gives us some confidence that both the read-only and
# read-write servers are running.
if len(connected) >= 2:
print( print(
"Connected to a server:\n" "Connected to a server:\n"
"\t{nodeid}\n" "\t{nodeid}\n"
...@@ -85,10 +90,13 @@ def get_api_root(path): ...@@ -85,10 +90,13 @@ def get_api_root(path):
return hyperlink.URL.from_text(f.read().strip()) return hyperlink.URL.from_text(f.read().strip())
def tahoe_put(api_root, data, **kwargs): def tahoe_put(api_root, data, **kwargs):
uri = api_root.child(u"uri").to_uri()
response = requests.put( response = requests.put(
api_root.child(u"uri").to_uri(), uri,
BytesIO(data), BytesIO(data),
headers={"accept": "text/plain"},
) )
print(f"PUT {uri} responded:\n{response.text}\n")
response.raise_for_status() response.raise_for_status()
return response.text return response.text
......
...@@ -12,21 +12,29 @@ from json import dumps ...@@ -12,21 +12,29 @@ from json import dumps
from time import sleep from time import sleep
def main(): def main():
if len(argv) != 5: if len(argv) == 4:
# If no issuer is given then we just won't make the charge request.
# This is useful for following the webhook-based workflow.
clientAPIRoot, clientAPITokenPath, voucher = argv[1:]
issuerAPIRoot = None
elif len(argv) == 5:
clientAPIRoot, clientAPITokenPath, issuerAPIRoot, voucher = argv[1:]
else:
raise SystemExit( raise SystemExit(
"usage: %s <client api root> <client api token path> <issuer api root> <voucher>", "usage: %s <client api root> <client api token path> [<issuer api root>] <voucher>",
) )
clientAPIRoot, clientAPITokenPath, issuerAPIRoot, voucher = argv[1:]
if not clientAPIRoot.endswith("/"): if not clientAPIRoot.endswith("/"):
clientAPIRoot += "/" clientAPIRoot += "/"
if not issuerAPIRoot.endswith("/"): if issuerAPIRoot is not None and not issuerAPIRoot.endswith("/"):
issuerAPIRoot += "/" issuerAPIRoot += "/"
zkapauthz = clientAPIRoot + "storage-plugins/privatestorageio-zkapauthz-v1" zkapauthz = clientAPIRoot + "storage-plugins/privatestorageio-zkapauthz-v2"
with open(clientAPITokenPath) as p: with open(clientAPITokenPath) as p:
clientAPIToken = p.read().strip() clientAPIToken = p.read().strip()
if issuerAPIRoot is not None:
# Submit a charge to the issuer (which is also the PaymentServer). # Submit a charge to the issuer (which is also the PaymentServer).
charge_response = post( charge_response = post(
issuerAPIRoot + "v1/stripe/charge", issuerAPIRoot + "v1/stripe/charge",
......
{ pkgs }: { pkgs, ... }:
let let
sshPrivateKey = ./probeuser_ed25519; ourpkgs = pkgs.callPackage ../pkgs { };
sshPublicKey = ./probeuser_ed25519.pub;
sshPrivateKeyFile = ./probeuser_ed25519;
sshPublicKeyFile = ./probeuser_ed25519.pub;
sshUsers = { sshUsers = {
root = (builtins.readFile sshPublicKey); root = [(builtins.readFile sshPublicKeyFile)];
probeuser = (builtins.readFile sshPublicKey); probeuser = [(builtins.readFile sshPublicKeyFile)];
}; };
# Generate a command which can be used with runOnNode to ssh to the given
# host.
ssh = username: hostname: [
"cp" sshPrivateKey "/tmp/ssh_key" ";"
"chmod" "0400" "/tmp/ssh_key" ";"
"ssh" "-oStrictHostKeyChecking=no" "-i" "/tmp/ssh_key" "${username}@${hostname}" ":"
];
# Separate helper programs so we can write as little python inside a string
# inside a nix expression as possible.
run-introducer = ./run-introducer.py;
run-client = ./run-client.py;
get-passes = ./get-passes.py;
exercise-storage = ./exercise-storage.py;
# This is a test double of the Stripe API server. It is extremely simple. # This is a test double of the Stripe API server. It is extremely simple.
# It barely knows how to respond to exactly the API endpoints we use, # It barely knows how to respond to exactly the API endpoints we use,
...@@ -30,6 +19,7 @@ let ...@@ -30,6 +19,7 @@ let
issuerURL = "http://issuer/"; issuerURL = "http://issuer/";
voucher = "xyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxy"; voucher = "xyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxy";
tokenCount = 1000;
# The issuer's signing key. Notionally, this is a secret key. This is only # The issuer's signing key. Notionally, this is a secret key. This is only
# the value for this system test though so I don't care if it leaks to the # the value for this system test though so I don't care if it leaks to the
...@@ -40,6 +30,7 @@ let ...@@ -40,6 +30,7 @@ let
basename = "signing-key.private"; basename = "signing-key.private";
in in
pkgs.writeText basename key; pkgs.writeText basename key;
ristrettoPublicKey = "xoNHEqAi+kC5EWfqN+kuDINhjQTwGrSQyshHvGFpoys=";
stripeSecretKeyPath = stripeSecretKeyPath =
let let
...@@ -49,6 +40,9 @@ let ...@@ -49,6 +40,9 @@ let
in in
pkgs.writeText basename key; pkgs.writeText basename key;
stripeWebhookSecretKey = "whsec_e302402f2f4b5d8241fe494cd693464345bf28c4d7312516d6c1ce69cd0c1e1d";
stripeWebhookSecretKeyPath = pkgs.writeText "stripe-webhook.secret" stripeWebhookSecretKey;
# Here are the preconstructed secrets which we can assign to the introducer. # Here are the preconstructed secrets which we can assign to the introducer.
# This is a lot easier than having the introducer generate them and then # This is a lot easier than having the introducer generate them and then
# discovering and configuring the other nodes with them. # discovering and configuring the other nodes with them.
...@@ -71,19 +65,9 @@ let ...@@ -71,19 +65,9 @@ let
networking.firewall.enable = false; networking.firewall.enable = false;
networking.dhcpcd.enable = false; networking.dhcpcd.enable = false;
}; };
# Return a python program fragment to run a shell command on one of the nodes.
# The first argument is the name of the node. The second is a list of the
# argv to run.
#
# The program's output is piped to systemd-cat and the python fragment
# evaluates to success if the command exits with a success status.
runOnNode = node: argv:
let
command = builtins.concatStringsSep " " argv;
in
"${node}.succeed('set -eo pipefail; ${command} | systemd-cat')";
in { in {
name = "private-storage";
# https://nixos.org/nixos/manual/index.html#sec-nixos-tests # https://nixos.org/nixos/manual/index.html#sec-nixos-tests
# https://nixos.mayflower.consulting/blog/2019/07/11/leveraging-nixos-tests-in-your-project/ # https://nixos.mayflower.consulting/blog/2019/07/11/leveraging-nixos-tests-in-your-project/
nodes = rec { nodes = rec {
...@@ -139,10 +123,11 @@ in { ...@@ -139,10 +123,11 @@ in {
tls = false; tls = false;
issuer = "Ristretto"; issuer = "Ristretto";
inherit ristrettoSigningKeyPath; inherit ristrettoSigningKeyPath;
tokensPerVoucher = tokenCount;
letsEncryptAdminEmail = "user@example.invalid"; letsEncryptAdminEmail = "user@example.invalid";
allowedChargeOrigins = [ "http://unused.invalid" ]; allowedChargeOrigins = [ "http://unused.invalid" ];
inherit stripeSecretKeyPath; inherit stripeSecretKeyPath stripeWebhookSecretKeyPath;
stripeEndpointDomain = "api_stripe_com"; stripeEndpointDomain = "api_stripe_com";
stripeEndpointScheme = "HTTP"; stripeEndpointScheme = "HTTP";
stripeEndpointPort = 80; stripeEndpointPort = 80;
...@@ -176,134 +161,15 @@ in { ...@@ -176,134 +161,15 @@ in {
}; };
# Test the machines with a Python program. # Test the machines with a Python program.
testScript = '' testScript = ourpkgs.lib.testing.makeTestScript {
# Boot the VMs. We used to do them all in parallel but the boot testpath = ./test_privatestorage.py;
# sequence got flaky at some point for some reason I don't kwargs = {
# understand. :/ It might be related to this: inherit sshPrivateKeyFile pemFile introducerPort introducerFURL issuerURL ristrettoPublicKey voucher tokenCount stripeWebhookSecretKey;
# # Supply some helper programs to help the tests stay a bit higher level.
# https://discourse.nixos.org/t/nixos-ppc64le-vm-does-not-have-dev-vda-device/11548/9 run_introducer = ./run-introducer.py;
# run_client = ./run-client.py;
# See <nixpkgs/nixos/modules/virtualisation/qemu-vm.nix> for the Nix get_passes = ./get-passes.py;
# that constructs the QEMU command that gets run. exercise_storage = ./exercise-storage.py;
# };
# Boot them one at a time for now. };
issuer.connect()
introducer.connect()
storage.connect()
client.connect()
api_stripe_com.connect()
# The issuer and the storage server should accept SSH connections. This
# doesn't prove it is so but if it fails it's a pretty good indication
# it isn't so.
storage.wait_for_open_port(22)
${runOnNode "issuer" (ssh "probeuser" "storage")}
${runOnNode "issuer" (ssh "root" "storage")}
issuer.wait_for_open_port(22)
${runOnNode "storage" (ssh "probeuser" "issuer")}
${runOnNode "storage" (ssh "root" "issuer")}
# Set up a Tahoe-LAFS introducer.
introducer.copy_from_host('${pemFile}', '/tmp/node.pem')
try:
${runOnNode "introducer" [ run-introducer "/tmp/node.pem" (toString introducerPort) introducerFURL ]}
except:
code, log = introducer.execute('cat /tmp/stdout /tmp/stderr')
introducer.log(log)
raise
#
# Get a Tahoe-LAFS storage server up.
#
code, version = storage.execute('tahoe --version')
storage.log(version)
# The systemd unit should reach the running state.
storage.wait_for_unit('tahoe.storage.service')
# Some while after that the Tahoe-LAFS node should listen on the web API
# port. The port number here has to agree with the port number set in
# the private-storage.nix module.
storage.wait_for_open_port(3456)
# Once the web API is listening it should be possible to scrape some
# status from the node if it is really working.
storage.succeed('tahoe -d /var/db/tahoe-lafs/storage status')
# It should have Eliot logging turned on as well.
storage.succeed('[ -e /var/db/tahoe-lafs/storage/logs/eliot.json ]')
#
# Storage appears to be working so try to get a client to speak with it.
#
${runOnNode "client" [ run-client "/tmp/client" introducerFURL issuerURL ]}
client.wait_for_open_port(3456)
# Make sure the fake Stripe API server is ready for requests.
try:
api_stripe_com.wait_for_unit("api.stripe.com")
except:
code, log = api_stripe_com.execute('journalctl -u api.stripe.com')
api_stripe_com.log(log)
raise
# Get some ZKAPs from the issuer.
try:
${runOnNode "client" [
get-passes
"http://127.0.0.1:3456"
"/tmp/client/private/api_auth_token"
issuerURL
voucher
]}
except:
code, log = client.execute('cat /tmp/stdout /tmp/stderr');
client.log(log)
# Dump the fake Stripe API server logs, too, since the error may arise
# from a PaymentServer/Stripe interaction.
code, log = api_stripe_com.execute('journalctl -u api.stripe.com')
api_stripe_com.log(log)
raise
# The client should be prepped now. Make it try to use some storage.
try:
${runOnNode "client" [ exercise-storage "/tmp/client" ]}
except:
code, log = client.execute('cat /tmp/stdout /tmp/stderr')
client.log(log)
raise
# It should be possible to restart the storage service without the
# storage node fURL changing.
try:
furlfile = '/var/db/tahoe-lafs/storage/private/storage-plugin.privatestorageio-zkapauthz-v1.furl'
before = storage.execute('cat ' + furlfile)
${runOnNode "storage" [ "systemctl" "restart" "tahoe.storage" ]}
after = storage.execute('cat ' + furlfile)
if (before != after):
raise Exception('fURL changes after storage node restart')
except:
code, log = storage.execute('cat /tmp/stdout /tmp/stderr')
storage.log(log)
raise
# The client should actually still work, too.
try:
${runOnNode "client" [ exercise-storage "/tmp/client" ]}
except:
code, log = client.execute('cat /tmp/stdout /tmp/stderr')
client.log(log)
raise
# The issuer metrics should be accessible from the monitoring network.
issuer.execute('ifconfig lo:fauxvpn 172.23.23.2/24')
issuer.wait_until_succeeds("nc -z 172.23.23.2 80")
issuer.succeed('curl --silent --insecure --fail --output /dev/null http://172.23.23.2/metrics')
# The issuer metrics should NOT be accessible from any other network.
issuer.fail('curl --silent --insecure --fail --output /dev/null http://localhost/metrics')
client.fail('curl --silent --insecure --fail --output /dev/null http://issuer/metrics')
issuer.execute('ifconfig lo:fauxvpn down')
'';
} }
...@@ -12,7 +12,7 @@ from subprocess import check_output ...@@ -12,7 +12,7 @@ from subprocess import check_output
from configparser import ConfigParser from configparser import ConfigParser
def main(): def main():
(nodePath, introducerFURL, issuerURL) = argv[1:] (nodePath, introducerFURL, issuerURL, publicKey, tokenCount) = argv[1:]
run(["tahoe", "--version"]) run(["tahoe", "--version"])
run([ run([
...@@ -29,14 +29,12 @@ def main(): ...@@ -29,14 +29,12 @@ def main():
with open("/tmp/client/tahoe.cfg") as cfg: with open("/tmp/client/tahoe.cfg") as cfg:
config.read_file(cfg) config.read_file(cfg)
config.set(u"client", u"storage.plugins", u"privatestorageio-zkapauthz-v1") config.set(u"client", u"storage.plugins", u"privatestorageio-zkapauthz-v2")
config.add_section(u"storageclient.plugins.privatestorageio-zkapauthz-v1") config.add_section(u"storageclient.plugins.privatestorageio-zkapauthz-v2")
config.set(u"storageclient.plugins.privatestorageio-zkapauthz-v1", u"redeemer", u"ristretto") config.set(u"storageclient.plugins.privatestorageio-zkapauthz-v2", u"redeemer", u"ristretto")
config.set(u"storageclient.plugins.privatestorageio-zkapauthz-v1", u"ristretto-issuer-root-url", issuerURL) config.set(u"storageclient.plugins.privatestorageio-zkapauthz-v2", u"ristretto-issuer-root-url", issuerURL)
# This has to agree with the PaymentServer configuration at the configured config.set(u"storageclient.plugins.privatestorageio-zkapauthz-v2", u"allowed-public-keys", publicKey)
# issuer location. Presently PaymentServer has 50000 hard-coded as the config.set(u"storageclient.plugins.privatestorageio-zkapauthz-v2", u"default-token-count", tokenCount)
# correct value.
config.set(u"storageclient.plugins.privatestorageio-zkapauthz-v1", u"default-token-count", u"50000")
with open("/tmp/client/tahoe.cfg", "wt") as cfg: with open("/tmp/client/tahoe.cfg", "wt") as cfg:
config.write(cfg) config.write(cfg)
...@@ -45,7 +43,7 @@ def main(): ...@@ -45,7 +43,7 @@ def main():
"daemonize", "daemonize",
"-o", "/tmp/stdout", "-o", "/tmp/stdout",
"-e", "/tmp/stderr", "-e", "/tmp/stderr",
which("tahoe"), "run", "/tmp/client", which("tahoe"), "run", "--allow-stdin-close", "/tmp/client",
]) ])
def run(argv): def run(argv):
......
...@@ -31,11 +31,11 @@ def main(): ...@@ -31,11 +31,11 @@ def main():
"daemonize", "daemonize",
"-o", "/tmp/stdout", "-o", "/tmp/stdout",
"-e", "/tmp/stderr", "-e", "/tmp/stderr",
which("tahoe"), "run", "/tmp/introducer", which("tahoe"), "run", "--allow-stdin-close", "/tmp/introducer",
]) ])
retry( retry(
"waiting for open introducer port", f"connect to introducer (port {introducerPort})",
lambda: checkOpen(int(introducerPort)), lambda: checkOpen(int(introducerPort)),
) )
......
...@@ -11,10 +11,14 @@ ...@@ -11,10 +11,14 @@
services.private-storage-spending.enable = true; services.private-storage-spending.enable = true;
services.private-storage-spending.domain = "localhost"; services.private-storage-spending.domain = "localhost";
}; };
external = { ... }: {
# A node that has no particular configuration, for testing access rules
# for external hosts.
};
}; };
testScript = { nodes }: let testScript = { nodes }: let
revision = nodes.spending.config.passthru.ourpkgs.zkap-spending-service.meta.rev; revision = nodes.spending.config.passthru.ourpkgs.zkap-spending-service.meta.rev;
curl = "${pkgs.curl}/bin/curl -sSf"; curl = "${pkgs.curl}/bin/curl -sSf --max-time 5";
in in
'' ''
import json import json
...@@ -25,8 +29,17 @@ ...@@ -25,8 +29,17 @@
with subtest("Ensure we can ping the spending service"): with subtest("Ensure we can ping the spending service"):
output = spending.succeed("${curl} http://localhost/v1/_ping") output = spending.succeed("${curl} http://localhost/v1/_ping")
assert json.loads(output)["status"] == "ok", "Could not ping spending service." assert json.loads(output)["status"] == "ok", "Could not ping spending service."
with subtest("Ensure external hosts can ping the spending service"):
output = external.succeed("${curl} http://spending/v1/_ping")
assert json.loads(output)["status"] == "ok", "Could not ping spending service."
with subtest("Ensure that the spending service version matches the expected version"): with subtest("Ensure that the spending service version matches the expected version"):
output = spending.succeed("${curl} http://localhost/v1/_version") output = spending.succeed("${curl} http://localhost/v1/_version")
assert json.loads(output)["revision"] == "${revision}", "Spending service revision does not match." assert json.loads(output)["revision"] == "${revision}", "Spending service revision does not match."
with subtest("Ensure that the spending service generates metrics"):
# TODO: We should pass "-H 'accept: application/openmetrics-text'" here.
# See https://github.com/prometheus/prometheus/issues/8932
output = spending.succeed("${curl} http://localhost/metrics | ${pkgs.prometheus}/bin/promtool check metrics")
with subtest("Ensure that the metrics are not accesible from other machines"):
output = external.fail("${curl} http://spending/metrics")
''; '';
} }
{ ... }: { pkgs, ... }:
let
ourpkgs = pkgs.callPackage ../pkgs { };
in
{ {
name = "tahoe";
nodes = { nodes = {
storage = { config, pkgs, ourpkgs, ... }: { storage = { config, pkgs, ourpkgs, ... }: {
imports = [ imports = [
...@@ -23,50 +27,7 @@ ...@@ -23,50 +27,7 @@
}; };
}; };
}; };
testScript = '' testScript = ourpkgs.lib.testing.makeTestScript {
start_all() testpath = ./test_tahoe.py;
};
# After the service starts, destroy the "created" marker to force it to
# re-create its internal state.
storage.wait_for_open_port(4001)
storage.succeed("systemctl stop tahoe.storage")
storage.succeed("rm /var/db/tahoe-lafs/storage.created")
storage.succeed("systemctl start tahoe.storage")
# After it starts up again, verify it has consistent internal state and a
# backup of the prior state.
storage.wait_for_open_port(4001)
storage.succeed("[ -e /var/db/tahoe-lafs/storage ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.created ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.1 ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.1/private/node.privkey ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.1/private/node.pem ]")
storage.succeed("[ ! -e /var/db/tahoe-lafs/storage.2 ]")
# Stop it again, once again destroy the "created" marker, and this time also
# jam some partial state in the way that will need cleanup.
storage.succeed("systemctl stop tahoe.storage")
storage.succeed("rm /var/db/tahoe-lafs/storage.created")
storage.succeed("mkdir -p /var/db/tahoe-lafs/storage.atomic/partial")
try:
storage.succeed("systemctl start tahoe.storage")
except:
x, y = storage.execute("journalctl -u tahoe.storage")
storage.log(y)
raise
# After it starts up again, verify it has consistent internal state and
# backups of the prior two states. It also has no copy of the inconsistent
# state because it could never have been used.
storage.wait_for_open_port(4001)
storage.succeed("[ -e /var/db/tahoe-lafs/storage ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.created ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.1 ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.2 ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.2/private/node.privkey ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.2/private/node.pem ]")
storage.succeed("[ ! -e /var/db/tahoe-lafs/storage.atomic ]")
storage.succeed("[ ! -e /var/db/tahoe-lafs/storage/partial ]")
storage.succeed("[ ! -e /var/db/tahoe-lafs/storage.3 ]")
'';
} }
import hmac
from shlex import quote
from time import time
def runOnNode(node, argvs):
"""
Run a shell command on one of the nodes. The first argument is the name
of the node. The second is a list of the argv to run.
The program's output is piped to systemd-cat and the python fragment
evaluates to success if the command exits with a success status.
"""
for argv in argvs:
try:
node.succeed('set -eo pipefail; {} | systemd-cat'.format(" ".join(map(quote, argv))))
except Exception as e:
code, output = node.execute('cat /tmp/stdout /tmp/stderr')
node.log(output)
raise
def ssh(username, sshPrivateKeyFile, hostname):
"""
Generate a command which can be used with runOnNode to ssh to the given
host.
"""
return [
["cp", sshPrivateKeyFile, "/tmp/ssh_key"],
["chmod", "0400", "/tmp/ssh_key"],
["ssh", "-oStrictHostKeyChecking=no", "-i", "/tmp/ssh_key",
"{username}@{hostname}".format(username=username, hostname=hostname), ":"],
]
def checkout_session_completed(voucher: str) -> str:
"""
Return a request body string which represents the payment completed event
for the given voucher.
"""
return """\
{
"id": "evt_1LxcsdBHXBAMm9bPSq6UWAZe",
"object": "event",
"api_version": "2019-11-05",
"created": 1666903247,
"data": {
"object": {
"id": "cs_test_a1kWLWGoXZPa6ywyVnuib8DPA3BqXCWZX5UEjLfKh7gLjdZy2LD3F5mEp3",
"object": "checkout.session",
"after_expiration": null,
"allow_promotion_codes": null,
"amount_subtotal": 3000,
"amount_total": 3000,
"automatic_tax": {
"enabled": false,
"status": null
},
"billing_address_collection": null,
"cancel_url": "https://httpbin.org/post",
"client_reference_id": "%(voucher)s",
"consent": null,
"consent_collection": null,
"created": 1666903243,
"currency": "usd",
"customer": "cus_Mh0u62xtelUehD",
"customer_creation": "always",
"customer_details": {
"address": {
"city": null,
"country": null,
"line1": null,
"line2": null,
"postal_code": null,
"state": null
},
"email": "stripe@example.com",
"name": null,
"phone": null,
"tax_exempt": "none",
"tax_ids": [
]
},
"customer_email": null,
"display_items": [
{
"amount": 1500,
"currency": "usd",
"custom": {
"description": "comfortable cotton t-shirt",
"images": null,
"name": "t-shirt"
},
"quantity": 2,
"type": "custom"
}
],
"expires_at": 1666989643,
"livemode": false,
"locale": null,
"metadata": {
},
"mode": "payment",
"payment_intent": "pi_3LxcsZBHXBAMm9bP1daBGoPV",
"payment_link": null,
"payment_method_collection": "always",
"payment_method_options": {
},
"payment_method_types": [
"card"
],
"payment_status": "paid",
"phone_number_collection": {
"enabled": false
},
"recovered_from": null,
"setup_intent": null,
"shipping": null,
"shipping_address_collection": null,
"shipping_options": [
],
"shipping_rate": null,
"status": "complete",
"submit_type": null,
"subscription": null,
"success_url": "https://httpbin.org/post",
"total_details": {
"amount_discount": 0,
"amount_shipping": 0,
"amount_tax": 0
},
"url": null
}
},
"livemode": false,
"pending_webhooks": 2,
"request": {
"id": null,
"idempotency_key": null
},
"type": "checkout.session.completed"
}
""" % dict(voucher=voucher)
def stripe_signature(key: str, body: str) -> str:
"""
Construct a valid value for the ``Stripe-Signature`` header item.
"""
timestamp = int(time())
v1 = hmac.new(key.encode("utf-8"), f"{timestamp}.{body}".encode("utf-8"), "sha256").hexdigest()
return f"t={timestamp},v1={v1}"
def pay_for_voucher(url: str, webhook_secret, voucher: str) -> list[str]:
"""
Return a command to run to report to the issuer that payment for the given
voucher has been received.
"""
body = checkout_session_completed(voucher)
return [
"curl",
"-X", "POST",
"--header", "content-type: application/json; charset=utf-8",
"--header", f"stripe-signature: {stripe_signature(webhook_secret, body)}",
"--data-binary", body,
url + "v1/stripe/webhook",
]
def test(
sshPrivateKeyFile,
pemFile,
run_introducer,
run_client,
get_passes,
exercise_storage,
introducerPort,
introducerFURL,
issuerURL,
ristrettoPublicKey,
stripeWebhookSecretKey,
voucher,
tokenCount,
):
"""
"""
# Boot the VMs. We used to do them all in parallel but the boot
# sequence got flaky at some point for some reason I don't
# understand. :/ It might be related to this:
#
# https://discourse.nixos.org/t/nixos-ppc64le-vm-does-not-have-dev-vda-device/11548/9
#
# See <nixpkgs/nixos/modules/virtualisation/qemu-vm.nix> for the Nix
# that constructs the QEMU command that gets run.
#
# Boot them one at a time for now.
issuer.connect()
introducer.connect()
storage.connect()
client.connect()
api_stripe_com.connect()
# The issuer and the storage server should accept SSH connections. This
# doesn't prove it is so but if it fails it's a pretty good indication
# it isn't so.
storage.wait_for_open_port(22)
runOnNode(issuer, ssh("probeuser", sshPrivateKeyFile, "storage"))
runOnNode(issuer, ssh("root", sshPrivateKeyFile, "storage"))
issuer.wait_for_open_port(22)
runOnNode(storage, ssh("probeuser", sshPrivateKeyFile, "issuer"))
runOnNode(storage, ssh("root", sshPrivateKeyFile, "issuer"))
# Set up a Tahoe-LAFS introducer.
introducer.copy_from_host(pemFile, '/tmp/node.pem')
runOnNode(introducer, [[run_introducer, "/tmp/node.pem", str(introducerPort), introducerFURL]])
#
# Get a Tahoe-LAFS storage server up.
#
code, version = storage.execute('tahoe --version')
storage.log(version)
# The systemd unit should reach the running state.
storage.wait_for_unit('tahoe.storage.service')
# Some while after that the Tahoe-LAFS node should listen on the web API
# port. The port number here has to agree with the port number set in
# the private-storage.nix module.
storage.wait_for_open_port(3456)
# Once the web API is listening it should be possible to scrape some
# status from the node if it is really working.
storage.succeed('tahoe -d /var/db/tahoe-lafs/storage status')
# It should have Eliot logging turned on as well.
storage.succeed('[ -e /var/db/tahoe-lafs/storage/logs/eliot.json ]')
# Make sure the issuer is ready to accept connections.
issuer.wait_for_open_port(80)
# Pretend to be Stripe and report that our voucher has been paid for.
runOnNode(issuer, [pay_for_voucher("http://localhost/", stripeWebhookSecretKey, voucher)])
#
# Storage appears to be working so try to get a client to speak with it.
#
runOnNode(client, [[run_client, "/tmp/client", introducerFURL, issuerURL, ristrettoPublicKey, str(tokenCount)]])
client.wait_for_open_port(3456)
# Make sure the fake Stripe API server is ready for requests.
try:
api_stripe_com.wait_for_open_port(80)
except:
code, output = api_stripe_com.execute('journalctl -u api.stripe.com')
api_stripe_com.log(output)
raise
# Get some ZKAPs from the issuer.
try:
runOnNode(client, [[
get_passes,
"http://127.0.0.1:3456",
"/tmp/client/private/api_auth_token",
voucher,
]])
except:
# Dump the fake Stripe API server logs, too, since the error may arise
# from a PaymentServer/Stripe interaction.
for node, unit in [(api_stripe_com, "api.stripe.com"), (issuer, "zkapissuer")]:
code, output = node.execute(f'journalctl -u {unit}')
node.log(output)
raise
# The client should be prepped now. Make it try to use some storage.
runOnNode(client, [[exercise_storage, "/tmp/client"]])
# It should be possible to restart the storage service without the
# storage node fURL changing.
furlfile = '/var/db/tahoe-lafs/storage/private/storage-plugin.privatestorageio-zkapauthz-v2.furl'
before = storage.execute('cat ' + furlfile)
runOnNode(storage, [["systemctl", "restart", "tahoe.storage"]])
after = storage.execute('cat ' + furlfile)
if (before != after):
raise Exception('fURL changes after storage node restart')
# The client should actually still work, too.
runOnNode(client, [[exercise_storage, "/tmp/client"]])
# The issuer metrics should be accessible from the monitoring network.
issuer.execute('ifconfig lo:fauxvpn 172.23.23.2/24')
issuer.wait_until_succeeds("nc -z 172.23.23.2 80")
issuer.succeed('curl --silent --insecure --fail --output /dev/null http://172.23.23.2/metrics')
# The issuer metrics should NOT be accessible from any other network.
issuer.fail('curl --silent --insecure --fail --output /dev/null http://localhost/metrics')
client.fail('curl --silent --insecure --fail --output /dev/null http://issuer/metrics')
issuer.execute('ifconfig lo:fauxvpn down')
def test():
start_all()
# After the service starts, destroy the "created" marker to force it to
# re-create its internal state.
storage.wait_for_open_port(4001)
storage.succeed("systemctl stop tahoe.storage")
storage.succeed("rm /var/db/tahoe-lafs/storage.created")
storage.succeed("systemctl start tahoe.storage")
# After it starts up again, verify it has consistent internal state and a
# backup of the prior state.
storage.wait_for_open_port(4001)
storage.succeed("[ -e /var/db/tahoe-lafs/storage ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.created ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.1 ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.1/private/node.privkey ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.1/private/node.pem ]")
storage.succeed("[ ! -e /var/db/tahoe-lafs/storage.2 ]")
# Stop it again, once again destroy the "created" marker, and this time also
# jam some partial state in the way that will need cleanup.
storage.succeed("systemctl stop tahoe.storage")
storage.succeed("rm /var/db/tahoe-lafs/storage.created")
storage.succeed("mkdir -p /var/db/tahoe-lafs/storage.atomic/partial")
try:
storage.succeed("systemctl start tahoe.storage")
except:
x, y = storage.execute("journalctl -u tahoe.storage")
storage.log(y)
raise
# After it starts up again, verify it has consistent internal state and
# backups of the prior two states. It also has no copy of the inconsistent
# state because it could never have been used.
storage.wait_for_open_port(4001)
storage.succeed("[ -e /var/db/tahoe-lafs/storage ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.created ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.1 ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.2 ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.2/private/node.privkey ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.2/private/node.pem ]")
storage.succeed("[ ! -e /var/db/tahoe-lafs/storage.atomic ]")
storage.succeed("[ ! -e /var/db/tahoe-lafs/storage/partial ]")
storage.succeed("[ ! -e /var/db/tahoe-lafs/storage.3 ]")
# The overall unit test suite for PrivateStorageio NixOS configuration. # The overall unit test suite for PrivateStorageio NixOS configuration.
{ pkgs }:
let let
pkgs = import <nixpkgs> { };
# Total the numbers in a list. # Total the numbers in a list.
sum = builtins.foldl' (a: b: a + b) 0; sum = builtins.foldl' (a: b: a + b) 0;
......
{
"name": "release2105",
"url": "https://releases.nixos.org/nixos/21.05/nixos-21.05.3367.fd8a7fd07da/nixexprs.tar.xz",
"sha256": "12p7v805xj5as2fbdh30i0b9iwy8y24sk256rgqfqylxj1784mn8"
}
import (builtins.fetchTarball (builtins.fromJSON (builtins.readFile ./nixpkgs-2105.json)))
{ "name": "nixpkgs"
, "url": "https://github.com/PrivateStorageio/nixpkgs/archive/5ebd5af2d5c6caf23735c8c0e6bc27357fa8d2a8.tar.gz"
, "sha256": "1g2bvs8prqjskzv8s1qmh36k7rmj98jib0syqbrq02xxzw5dpqb4"
}
{
"name": "source",
"url": "https://releases.nixos.org/nixos/25.05/nixos-25.05.804002.5f4f306bea96/nixexprs.tar.xz",
"sha256": "1mawn3x0wyds5pqfbckz8kh0yg5ss4hpnz5p9nm9yj8y475gfvzr"
}
\ No newline at end of file