Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • privatestorage/PrivateStorageio
  • tomprince/PrivateStorageio
2 results
Show changes
Showing
with 373 additions and 27 deletions
{ callPackage, fetchFromGitLab, lib }:
let
repo-data = lib.importJSON ./repo.json;
repo = fetchFromGitLab (builtins.removeAttrs repo-data [ "branch" ]);
LeaseReport = (import "${repo}/nix").LeaseReport;
in
LeaseReport.components.exes.LeaseReport
{
"owner": "privatestorage",
"repo": "LeaseReport",
"branch": "main",
"domain": "whetstone.private.storage",
"rev": "25174533c782f5e5f17aa1fa4d29e2adbdf96a08",
"outputHashAlgo": "sha512",
"outputHash": "0h3yzmcizxkz2dl54b8xzbkdb1bvnqiyp8xrhjgzi59y3iq3ggss9i5cy2mbmy467ri8llnax2p2paykv29lw6c8d8zihw1qq5gv46v"
}
\ No newline at end of file
{ pkgs ? import <nixpkgs> {} }:
let
repo-data = pkgs.lib.importJSON ./repo.json;
repo = pkgs.fetchFromGitHub (builtins.removeAttrs repo-data [ "branch" ]);
in
pkgs.stdenv.mkDerivation {
name = "megacli2prom";
buildInputs = [ pkgs.python3 pkgs.megacli ];
src = repo;
installPhase = ''
mkdir -p $out/bin
cp ./megacli2prom.py $out/bin/megacli2prom
chmod +x $out/bin/megacli2prom
'';
}
{
"owner": "PrivateStorageio",
"repo": "megacli2prom",
"branch": "main",
"rev": "2872bf3526c6074e21ddf9bd684355c928bf1626",
"outputHashAlgo": "sha512",
"outputHash": "1yvyz6lngsx7dv5nb89gb8akzj3hmiihjcg8ya3xcbdvv39qydd8l416k3b3w08pz87gwvxd44bhni5kljqdj60ixyhf1mcvmm0rffw"
}
\ No newline at end of file
{ fetchFromGitHub, callPackage, lib }:
let
repo-data = lib.importJSON ./repo.json;
repo = fetchFromGitHub (builtins.removeAttrs repo-data [ "branch" ]);
zk = import repo;
# XXX package version choice here
zkapauthorizer = zk.outputs.packages.x86_64-linux.zkapauthorizer-python39-tahoe_dev;
python = zkapauthorizer.passthru.python;
in
python.withPackages (ps: [ zkapauthorizer ] )
{
"owner": "PrivateStorageio",
"branch": "main",
"repo": "ZKAPAuthorizer",
"rev": "fb89e91a6c7f595cd0b1c7aa7055cbd32c482180",
"outputHashAlgo": "sha512",
"outputHash": "3f44znykq8f7mcgdwdyhgf2dvnx7yydmlrjcr17mxfwya4jqmx8zb59mxkxvar0ahn639y2nq3bcqxdyipljfxilfi1cz21li908kkw"
}
\ No newline at end of file
{ callPackage, fetchFromGitLab, lib }:
let
repo-data = lib.importJSON ./repo.json;
repo = fetchFromGitLab (builtins.removeAttrs repo-data [ "branch" ]);
in
# We want to check the revision the service reports against the revsion
# that we install. The upsream derivation doesn't currently know its own
# version, but we do have it here. Thus, we add it as a meta attribute
# to the derviation provided from upstream.
lib.addMetaAttrs { inherit (repo-data) rev; }
(callPackage repo {})
{
"owner": "privatestorage",
"repo": "zkap-spending-service",
"rev": "66fd395268b466d4c7bb0a740fb758a5acccd1c4",
"branch": "main",
"domain": "whetstone.private.storage",
"outputHash": "1nryvsccncrka25kzrwqkif4x68ib0cs2vbw1ngfmzw86gjgqx01a7acgspmrpfs62p4q8zw0f2ynl8jr3ygyypjrl8v7w8g49y0y0y",
"outputHashAlgo": "sha512"
}
{ callPackage, fetchFromGitHub, lib }:
let
repo-data = lib.importJSON ./repo.json;
repo = fetchFromGitHub (builtins.removeAttrs repo-data [ "branch" ]);
PaymentServer = (import "${repo}/nix").PaymentServer;
in
PaymentServer.components.exes."PaymentServer-exe"
{
"owner": "PrivateStorageio",
"repo": "PaymentServer",
"rev": "8f5c3ede67c767545ba9527fc208b43603ce2ea9",
"branch": "update-deps-2025-02",
"outputHashAlgo": "sha512",
"outputHash": "3mfyj1q83ivcqa1imwiwyy1222xr03i08kmfs0wdya3ji9brmz8kagg6rwc8fc62ir9n4a61ma052lbwinq1b7khpww509dz0xkxz8g"
}
\ No newline at end of file
# The overall system test suite for PrivateStorageio NixOS configuration.
{ pkgs }:
let
pkgs = import ../nixpkgs-ps.nix { };
# Add custom packages as an attribute, so it they only need to be evalutated once.
# See the comment in `morph/lib/default.nix` for details.
pkgs' = pkgs.extend (self: super: { ourpkgs = self.callPackage ./pkgs {}; });
in {
private-storage = pkgs.nixosTest ./modules/tests/private-storage.nix;
tahoe = pkgs.nixosTest ./modules/tests/tahoe.nix;
private-storage = pkgs'.nixosTest ./tests/private-storage.nix;
# The spending service is not deployed so it doesn't seem *necessary* to run
# its test suite here. The packaging still uses mach-nix which is
# incompatible with NixOS 22.11 so we can't actually load the ZKAP spending
# service derivation anymore. So ... disable the test suite.
#
# spending = pkgs'.nixosTest ./tests/spending.nix;
tahoe = pkgs'.nixosTest ./tests/tahoe.nix;
}
......@@ -47,7 +47,12 @@ def block_until_connected(api_root):
in servers
if server["connection_status"].startswith("Connected to ")
)
if len(connected) >= 1:
# There is a read-only server and a read-write server! The easiest
# way to be sure we've connected to the read-write server is to wait
# until we're connected to both. Also, if we manage to connect to two
# servers this gives us some confidence that both the read-only and
# read-write servers are running.
if len(connected) >= 2:
print(
"Connected to a server:\n"
"\t{nodeid}\n"
......@@ -85,10 +90,13 @@ def get_api_root(path):
return hyperlink.URL.from_text(f.read().strip())
def tahoe_put(api_root, data, **kwargs):
uri = api_root.child(u"uri").to_uri()
response = requests.put(
api_root.child(u"uri").to_uri(),
uri,
BytesIO(data),
headers={"accept": "text/plain"},
)
print(f"PUT {uri} responded:\n{response.text}\n")
response.raise_for_status()
return response.text
......
......@@ -12,30 +12,38 @@ from json import dumps
from time import sleep
def main():
if len(argv) != 5:
if len(argv) == 4:
# If no issuer is given then we just won't make the charge request.
# This is useful for following the webhook-based workflow.
clientAPIRoot, clientAPITokenPath, voucher = argv[1:]
issuerAPIRoot = None
elif len(argv) == 5:
clientAPIRoot, clientAPITokenPath, issuerAPIRoot, voucher = argv[1:]
else:
raise SystemExit(
"usage: %s <client api root> <client api token path> <issuer api root> <voucher>",
"usage: %s <client api root> <client api token path> [<issuer api root>] <voucher>",
)
clientAPIRoot, clientAPITokenPath, issuerAPIRoot, voucher = argv[1:]
if not clientAPIRoot.endswith("/"):
clientAPIRoot += "/"
if not issuerAPIRoot.endswith("/"):
if issuerAPIRoot is not None and not issuerAPIRoot.endswith("/"):
issuerAPIRoot += "/"
zkapauthz = clientAPIRoot + "storage-plugins/privatestorageio-zkapauthz-v1"
zkapauthz = clientAPIRoot + "storage-plugins/privatestorageio-zkapauthz-v2"
with open(clientAPITokenPath) as p:
clientAPIToken = p.read().strip()
# Submit a charge to the issuer (which is also the PaymentServer).
charge_response = post(
issuerAPIRoot + "v1/stripe/charge",
dumps(charge_json(voucher)),
headers={
"content-type": "application/json",
},
)
charge_response.raise_for_status()
if issuerAPIRoot is not None:
# Submit a charge to the issuer (which is also the PaymentServer).
charge_response = post(
issuerAPIRoot + "v1/stripe/charge",
dumps(charge_json(voucher)),
headers={
"content-type": "application/json",
},
)
charge_response.raise_for_status()
# Tell the client to redeem the voucher.
response = put(
......
File moved
{ pkgs }:
{ pkgs, ... }:
let
sshPrivateKey = ./probeuser_ed25519;
sshPublicKey = ./probeuser_ed25519.pub;
ourpkgs = pkgs.callPackage ../pkgs { };
sshPrivateKeyFile = ./probeuser_ed25519;
sshPublicKeyFile = ./probeuser_ed25519.pub;
sshUsers = {
root = (builtins.readFile sshPublicKey);
probeuser = (builtins.readFile sshPublicKey);
root = [(builtins.readFile sshPublicKeyFile)];
probeuser = [(builtins.readFile sshPublicKeyFile)];
};
# Generate a command which can be used with runOnNode to ssh to the given
# host.
ssh = username: hostname: [
"cp" sshPrivateKey "/tmp/ssh_key" ";"
"chmod" "0400" "/tmp/ssh_key" ";"
"ssh" "-oStrictHostKeyChecking=no" "-i" "/tmp/ssh_key" "${username}@${hostname}" ":"
];
# Separate helper programs so we can write as little perl inside a string
# inside a nix expression as possible.
run-introducer = ./run-introducer.py;
run-client = ./run-client.py;
get-passes = ./get-passes.py;
exercise-storage = ./exercise-storage.py;
# This is a test double of the Stripe API server. It is extremely simple.
# It barely knows how to respond to exactly the API endpoints we use,
......@@ -30,6 +19,7 @@ let
issuerURL = "http://issuer/";
voucher = "xyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxy";
tokenCount = 1000;
# The issuer's signing key. Notionally, this is a secret key. This is only
# the value for this system test though so I don't care if it leaks to the
......@@ -40,6 +30,7 @@ let
basename = "signing-key.private";
in
pkgs.writeText basename key;
ristrettoPublicKey = "xoNHEqAi+kC5EWfqN+kuDINhjQTwGrSQyshHvGFpoys=";
stripeSecretKeyPath =
let
......@@ -49,6 +40,9 @@ let
in
pkgs.writeText basename key;
stripeWebhookSecretKey = "whsec_e302402f2f4b5d8241fe494cd693464345bf28c4d7312516d6c1ce69cd0c1e1d";
stripeWebhookSecretKeyPath = pkgs.writeText "stripe-webhook.secret" stripeWebhookSecretKey;
# Here are the preconstructed secrets which we can assign to the introducer.
# This is a lot easier than having the introducer generate them and then
# discovering and configuring the other nodes with them.
......@@ -71,34 +65,21 @@ let
networking.firewall.enable = false;
networking.dhcpcd.enable = false;
};
# Return a Perl program fragment to run a shell command on one of the nodes.
# The first argument is the name of the node. The second is a list of the
# argv to run.
#
# The program's output is piped to systemd-cat and the Perl fragment
# evaluates to success if the command exits with a success status.
runOnNode = node: argv:
let
command = builtins.concatStringsSep " " argv;
in
"
\$${node}->succeed('set -eo pipefail; ${command} | systemd-cat');
# succeed() is not success but 1 is.
1;
";
in {
name = "private-storage";
# https://nixos.org/nixos/manual/index.html#sec-nixos-tests
# https://nixos.mayflower.consulting/blog/2019/07/11/leveraging-nixos-tests-in-your-project/
nodes = rec {
# Get a machine where we can run a Tahoe-LAFS client node.
client =
{ config, pkgs, ... }:
{ environment.systemPackages = [
{ config, pkgs, ourpkgs, ... }:
{ imports = [ ../modules/packages.nix ];
environment.systemPackages = [
pkgs.daemonize
# A Tahoe-LAFS configuration capable of using the right storage
# plugin.
pkgs.privatestorage
ourpkgs.privatestorage
# Support for the tests we'll run.
(pkgs.python3.withPackages (ps: [ ps.requests ps.hyperlink ]))
];
......@@ -111,7 +92,9 @@ in {
storage =
{ config, pkgs, ... }:
{ imports =
[ ../private-storage.nix
[ ../modules/packages.nix
../modules/private-storage.nix
../modules/ssh.nix
];
services.private-storage = {
enable = true;
......@@ -128,7 +111,9 @@ in {
issuer =
{ config, pkgs, ... }:
{ imports =
[ ../issuer.nix
[ ../modules/packages.nix
../modules/issuer.nix
../modules/ssh.nix
];
services.private-storage.sshUsers = sshUsers;
......@@ -138,10 +123,11 @@ in {
tls = false;
issuer = "Ristretto";
inherit ristrettoSigningKeyPath;
tokensPerVoucher = tokenCount;
letsEncryptAdminEmail = "user@example.invalid";
allowedChargeOrigins = [ "http://unused.invalid" ];
inherit stripeSecretKeyPath;
inherit stripeSecretKeyPath stripeWebhookSecretKeyPath;
stripeEndpointDomain = "api_stripe_com";
stripeEndpointScheme = "HTTP";
stripeEndpointPort = 80;
......@@ -174,138 +160,16 @@ in {
};
};
# Test the machines with a Perl program (sobbing).
testScript =
''
# Boot the VMs. We used to do them all in parallel but the boot
# sequence got flaky at some point for some reason I don't
# understand. :/ It might be related to this:
#
# https://discourse.nixos.org/t/nixos-ppc64le-vm-does-not-have-dev-vda-device/11548/9
#
# See <nixpkgs/nixos/modules/virtualisation/qemu-vm.nix> for the Nix
# that constructs the QEMU command that gets run.
#
# Boot them one at a time for now.
$issuer->connect();
$introducer->connect();
$storage->connect();
$client->connect();
$api_stripe_com->connect();
# The issuer and the storage server should accept SSH connections. This
# doesn't prove it is so but if it fails it's a pretty good indication
# it isn't so.
$storage->waitForOpenPort(22);
${runOnNode "issuer" (ssh "probeuser" "storage")}
${runOnNode "issuer" (ssh "root" "storage")}
$issuer->waitForOpenPort(22);
${runOnNode "storage" (ssh "probeuser" "issuer")}
${runOnNode "storage" (ssh "root" "issuer")}
# Set up a Tahoe-LAFS introducer.
$introducer->copyFileFromHost(
'${pemFile}',
'/tmp/node.pem'
);
eval {
${runOnNode "introducer" [ run-introducer "/tmp/node.pem" (toString introducerPort) introducerFURL ]}
} or do {
my ($code, $log) = $introducer->execute('cat /tmp/stdout /tmp/stderr');
$introducer->log($log);
die $@;
};
#
# Get a Tahoe-LAFS storage server up.
#
my ($code, $version) = $storage->execute('tahoe --version');
$storage->log($version);
# The systemd unit should reach the running state.
$storage->waitForUnit('tahoe.storage.service');
# Some while after that the Tahoe-LAFS node should listen on the web API
# port. The port number here has to agree with the port number set in
# the private-storage.nix module.
$storage->waitForOpenPort(3456);
# Once the web API is listening it should be possible to scrape some
# status from the node if it is really working.
$storage->succeed('tahoe -d /var/db/tahoe-lafs/storage status');
# It should have Eliot logging turned on as well.
$storage->succeed('[ -e /var/db/tahoe-lafs/storage/logs/eliot.json ]');
#
# Storage appears to be working so try to get a client to speak with it.
#
${runOnNode "client" [ run-client "/tmp/client" introducerFURL issuerURL ]}
$client->waitForOpenPort(3456);
# Make sure the fake Stripe API server is ready for requests.
eval {
$api_stripe_com->waitForUnit("api.stripe.com");
1;
} or do {
my ($code, $log) = $api_stripe_com->execute('journalctl -u api.stripe.com');
$api_stripe_com->log($log);
die $@;
};
# Get some ZKAPs from the issuer.
eval {
${runOnNode "client" [
get-passes
"http://127.0.0.1:3456"
"/tmp/client/private/api_auth_token"
issuerURL
voucher
]}
} or do {
my ($code, $log) = $client->execute('cat /tmp/stdout /tmp/stderr');
$client->log($log);
# Dump the fake Stripe API server logs, too, since the error may arise
# from a PaymentServer/Stripe interaction.
my ($code, $log) = $api_stripe_com->execute('journalctl -u api.stripe.com');
$api_stripe_com->log($log);
die $@;
};
# The client should be prepped now. Make it try to use some storage.
eval {
${runOnNode "client" [ exercise-storage "/tmp/client" ]}
} or do {
my ($code, $log) = $client->execute('cat /tmp/stdout /tmp/stderr');
$client->log($log);
die $@;
};
# It should be possible to restart the storage service without the
# storage node fURL changing.
eval {
my $furlfile = '/var/db/tahoe-lafs/storage/private/storage-plugin.privatestorageio-zkapauthz-v1.furl';
my $before = $storage->execute('cat ' . $furlfile);
${runOnNode "storage" [ "systemctl" "restart" "tahoe.storage" ]}
my $after = $storage->execute('cat ' . $furlfile);
if ($before != $after) {
die 'fURL changes after storage node restart';
}
1;
} or do {
my ($code, $log) = $storage->execute('cat /tmp/stdout /tmp/stderr');
$storage->log($log);
die $@;
};
# The client should actually still work, too.
eval {
${runOnNode "client" [ exercise-storage "/tmp/client" ]}
} or do {
my ($code, $log) = $client->execute('cat /tmp/stdout /tmp/stderr');
$client->log($log);
die $@;
};
''; }
# Test the machines with a Python program.
testScript = ourpkgs.lib.testing.makeTestScript {
testpath = ./test_privatestorage.py;
kwargs = {
inherit sshPrivateKeyFile pemFile introducerPort introducerFURL issuerURL ristrettoPublicKey voucher tokenCount stripeWebhookSecretKey;
# Supply some helper programs to help the tests stay a bit higher level.
run_introducer = ./run-introducer.py;
run_client = ./run-client.py;
get_passes = ./get-passes.py;
exercise_storage = ./exercise-storage.py;
};
};
}
......@@ -12,7 +12,7 @@ from subprocess import check_output
from configparser import ConfigParser
def main():
(nodePath, introducerFURL, issuerURL) = argv[1:]
(nodePath, introducerFURL, issuerURL, publicKey, tokenCount) = argv[1:]
run(["tahoe", "--version"])
run([
......@@ -29,10 +29,12 @@ def main():
with open("/tmp/client/tahoe.cfg") as cfg:
config.read_file(cfg)
config.set(u"client", u"storage.plugins", u"privatestorageio-zkapauthz-v1")
config.add_section(u"storageclient.plugins.privatestorageio-zkapauthz-v1")
config.set(u"storageclient.plugins.privatestorageio-zkapauthz-v1", u"redeemer", u"ristretto")
config.set(u"storageclient.plugins.privatestorageio-zkapauthz-v1", u"ristretto-issuer-root-url", issuerURL)
config.set(u"client", u"storage.plugins", u"privatestorageio-zkapauthz-v2")
config.add_section(u"storageclient.plugins.privatestorageio-zkapauthz-v2")
config.set(u"storageclient.plugins.privatestorageio-zkapauthz-v2", u"redeemer", u"ristretto")
config.set(u"storageclient.plugins.privatestorageio-zkapauthz-v2", u"ristretto-issuer-root-url", issuerURL)
config.set(u"storageclient.plugins.privatestorageio-zkapauthz-v2", u"allowed-public-keys", publicKey)
config.set(u"storageclient.plugins.privatestorageio-zkapauthz-v2", u"default-token-count", tokenCount)
with open("/tmp/client/tahoe.cfg", "wt") as cfg:
config.write(cfg)
......@@ -41,7 +43,7 @@ def main():
"daemonize",
"-o", "/tmp/stdout",
"-e", "/tmp/stderr",
which("tahoe"), "run", "/tmp/client",
which("tahoe"), "run", "--allow-stdin-close", "/tmp/client",
])
def run(argv):
......
......@@ -31,11 +31,11 @@ def main():
"daemonize",
"-o", "/tmp/stdout",
"-e", "/tmp/stderr",
which("tahoe"), "run", "/tmp/introducer",
which("tahoe"), "run", "--allow-stdin-close", "/tmp/introducer",
])
retry(
"waiting for open introducer port",
f"connect to introducer (port {introducerPort})",
lambda: checkOpen(int(introducerPort)),
)
......
{ pkgs, lib, ... }:
{
name = "zkap-spending-service";
nodes = {
spending = { config, pkgs, ourpkgs, modulesPath, ... }: {
imports = [
../modules/packages.nix
../modules/spending.nix
];
services.private-storage-spending.enable = true;
services.private-storage-spending.domain = "localhost";
};
external = { ... }: {
# A node that has no particular configuration, for testing access rules
# for external hosts.
};
};
testScript = { nodes }: let
revision = nodes.spending.config.passthru.ourpkgs.zkap-spending-service.meta.rev;
curl = "${pkgs.curl}/bin/curl -sSf --max-time 5";
in
''
import json
start_all()
spending.wait_for_open_port(80)
with subtest("Ensure we can ping the spending service"):
output = spending.succeed("${curl} http://localhost/v1/_ping")
assert json.loads(output)["status"] == "ok", "Could not ping spending service."
with subtest("Ensure external hosts can ping the spending service"):
output = external.succeed("${curl} http://spending/v1/_ping")
assert json.loads(output)["status"] == "ok", "Could not ping spending service."
with subtest("Ensure that the spending service version matches the expected version"):
output = spending.succeed("${curl} http://localhost/v1/_version")
assert json.loads(output)["revision"] == "${revision}", "Spending service revision does not match."
with subtest("Ensure that the spending service generates metrics"):
# TODO: We should pass "-H 'accept: application/openmetrics-text'" here.
# See https://github.com/prometheus/prometheus/issues/8932
output = spending.succeed("${curl} http://localhost/metrics | ${pkgs.prometheus}/bin/promtool check metrics")
with subtest("Ensure that the metrics are not accesible from other machines"):
output = external.fail("${curl} http://spending/metrics")
'';
}