Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • privatestorage/PrivateStorageio
  • tomprince/PrivateStorageio
2 results
Show changes
Showing with 739 additions and 41 deletions
{ pkgs, ... }:
let
ourpkgs = pkgs.callPackage ../pkgs { };
in
{
name = "tahoe";
nodes = {
storage = { config, pkgs, ourpkgs, ... }: {
imports = [
../modules/packages.nix
../modules/tahoe.nix
];
services.tahoe.nodes.storage = {
package = ourpkgs.privatestorage;
sections = {
node = {
nickname = "storage";
"web.port" = "tcp:4000:interface=127.0.0.1";
"tub.port" = "tcp:4001";
"tub.location" = "tcp:127.0.0.1:4001";
};
storage = {
enabled = true;
};
};
};
};
};
testScript = ourpkgs.lib.testing.makeTestScript {
testpath = ./test_tahoe.py;
};
}
import hmac
from shlex import quote
from time import time
def runOnNode(node, argvs):
"""
Run a shell command on one of the nodes. The first argument is the name
of the node. The second is a list of the argv to run.
The program's output is piped to systemd-cat and the python fragment
evaluates to success if the command exits with a success status.
"""
for argv in argvs:
try:
node.succeed('set -eo pipefail; {} | systemd-cat'.format(" ".join(map(quote, argv))))
except Exception as e:
code, output = node.execute('cat /tmp/stdout /tmp/stderr')
node.log(output)
raise
def ssh(username, sshPrivateKeyFile, hostname):
"""
Generate a command which can be used with runOnNode to ssh to the given
host.
"""
return [
["cp", sshPrivateKeyFile, "/tmp/ssh_key"],
["chmod", "0400", "/tmp/ssh_key"],
["ssh", "-oStrictHostKeyChecking=no", "-i", "/tmp/ssh_key",
"{username}@{hostname}".format(username=username, hostname=hostname), ":"],
]
def checkout_session_completed(voucher: str) -> str:
"""
Return a request body string which represents the payment completed event
for the given voucher.
"""
return """\
{
"id": "evt_1LxcsdBHXBAMm9bPSq6UWAZe",
"object": "event",
"api_version": "2019-11-05",
"created": 1666903247,
"data": {
"object": {
"id": "cs_test_a1kWLWGoXZPa6ywyVnuib8DPA3BqXCWZX5UEjLfKh7gLjdZy2LD3F5mEp3",
"object": "checkout.session",
"after_expiration": null,
"allow_promotion_codes": null,
"amount_subtotal": 3000,
"amount_total": 3000,
"automatic_tax": {
"enabled": false,
"status": null
},
"billing_address_collection": null,
"cancel_url": "https://httpbin.org/post",
"client_reference_id": "%(voucher)s",
"consent": null,
"consent_collection": null,
"created": 1666903243,
"currency": "usd",
"customer": "cus_Mh0u62xtelUehD",
"customer_creation": "always",
"customer_details": {
"address": {
"city": null,
"country": null,
"line1": null,
"line2": null,
"postal_code": null,
"state": null
},
"email": "stripe@example.com",
"name": null,
"phone": null,
"tax_exempt": "none",
"tax_ids": [
]
},
"customer_email": null,
"display_items": [
{
"amount": 1500,
"currency": "usd",
"custom": {
"description": "comfortable cotton t-shirt",
"images": null,
"name": "t-shirt"
},
"quantity": 2,
"type": "custom"
}
],
"expires_at": 1666989643,
"livemode": false,
"locale": null,
"metadata": {
},
"mode": "payment",
"payment_intent": "pi_3LxcsZBHXBAMm9bP1daBGoPV",
"payment_link": null,
"payment_method_collection": "always",
"payment_method_options": {
},
"payment_method_types": [
"card"
],
"payment_status": "paid",
"phone_number_collection": {
"enabled": false
},
"recovered_from": null,
"setup_intent": null,
"shipping": null,
"shipping_address_collection": null,
"shipping_options": [
],
"shipping_rate": null,
"status": "complete",
"submit_type": null,
"subscription": null,
"success_url": "https://httpbin.org/post",
"total_details": {
"amount_discount": 0,
"amount_shipping": 0,
"amount_tax": 0
},
"url": null
}
},
"livemode": false,
"pending_webhooks": 2,
"request": {
"id": null,
"idempotency_key": null
},
"type": "checkout.session.completed"
}
""" % dict(voucher=voucher)
def stripe_signature(key: str, body: str) -> str:
"""
Construct a valid value for the ``Stripe-Signature`` header item.
"""
timestamp = int(time())
v1 = hmac.new(key.encode("utf-8"), f"{timestamp}.{body}".encode("utf-8"), "sha256").hexdigest()
return f"t={timestamp},v1={v1}"
def pay_for_voucher(url: str, webhook_secret, voucher: str) -> list[str]:
"""
Return a command to run to report to the issuer that payment for the given
voucher has been received.
"""
body = checkout_session_completed(voucher)
return [
"curl",
"-X", "POST",
"--header", "content-type: application/json; charset=utf-8",
"--header", f"stripe-signature: {stripe_signature(webhook_secret, body)}",
"--data-binary", body,
url + "v1/stripe/webhook",
]
def test(
sshPrivateKeyFile,
pemFile,
run_introducer,
run_client,
get_passes,
exercise_storage,
introducerPort,
introducerFURL,
issuerURL,
ristrettoPublicKey,
stripeWebhookSecretKey,
voucher,
tokenCount,
):
"""
"""
# Boot the VMs. We used to do them all in parallel but the boot
# sequence got flaky at some point for some reason I don't
# understand. :/ It might be related to this:
#
# https://discourse.nixos.org/t/nixos-ppc64le-vm-does-not-have-dev-vda-device/11548/9
#
# See <nixpkgs/nixos/modules/virtualisation/qemu-vm.nix> for the Nix
# that constructs the QEMU command that gets run.
#
# Boot them one at a time for now.
issuer.connect()
introducer.connect()
storage.connect()
client.connect()
api_stripe_com.connect()
# The issuer and the storage server should accept SSH connections. This
# doesn't prove it is so but if it fails it's a pretty good indication
# it isn't so.
storage.wait_for_open_port(22)
runOnNode(issuer, ssh("probeuser", sshPrivateKeyFile, "storage"))
runOnNode(issuer, ssh("root", sshPrivateKeyFile, "storage"))
issuer.wait_for_open_port(22)
runOnNode(storage, ssh("probeuser", sshPrivateKeyFile, "issuer"))
runOnNode(storage, ssh("root", sshPrivateKeyFile, "issuer"))
# Set up a Tahoe-LAFS introducer.
introducer.copy_from_host(pemFile, '/tmp/node.pem')
runOnNode(introducer, [[run_introducer, "/tmp/node.pem", str(introducerPort), introducerFURL]])
#
# Get a Tahoe-LAFS storage server up.
#
code, version = storage.execute('tahoe --version')
storage.log(version)
# The systemd unit should reach the running state.
storage.wait_for_unit('tahoe.storage.service')
# Some while after that the Tahoe-LAFS node should listen on the web API
# port. The port number here has to agree with the port number set in
# the private-storage.nix module.
storage.wait_for_open_port(3456)
# Once the web API is listening it should be possible to scrape some
# status from the node if it is really working.
storage.succeed('tahoe -d /var/db/tahoe-lafs/storage status')
# It should have Eliot logging turned on as well.
storage.succeed('[ -e /var/db/tahoe-lafs/storage/logs/eliot.json ]')
# Make sure the issuer is ready to accept connections.
issuer.wait_for_open_port(80)
# Pretend to be Stripe and report that our voucher has been paid for.
runOnNode(issuer, [pay_for_voucher("http://localhost/", stripeWebhookSecretKey, voucher)])
#
# Storage appears to be working so try to get a client to speak with it.
#
runOnNode(client, [[run_client, "/tmp/client", introducerFURL, issuerURL, ristrettoPublicKey, str(tokenCount)]])
client.wait_for_open_port(3456)
# Make sure the fake Stripe API server is ready for requests.
try:
api_stripe_com.wait_for_open_port(80)
except:
code, output = api_stripe_com.execute('journalctl -u api.stripe.com')
api_stripe_com.log(output)
raise
# Get some ZKAPs from the issuer.
try:
runOnNode(client, [[
get_passes,
"http://127.0.0.1:3456",
"/tmp/client/private/api_auth_token",
voucher,
]])
except:
# Dump the fake Stripe API server logs, too, since the error may arise
# from a PaymentServer/Stripe interaction.
for node, unit in [(api_stripe_com, "api.stripe.com"), (issuer, "zkapissuer")]:
code, output = node.execute(f'journalctl -u {unit}')
node.log(output)
raise
# The client should be prepped now. Make it try to use some storage.
runOnNode(client, [[exercise_storage, "/tmp/client"]])
# It should be possible to restart the storage service without the
# storage node fURL changing.
furlfile = '/var/db/tahoe-lafs/storage/private/storage-plugin.privatestorageio-zkapauthz-v2.furl'
before = storage.execute('cat ' + furlfile)
runOnNode(storage, [["systemctl", "restart", "tahoe.storage"]])
after = storage.execute('cat ' + furlfile)
if (before != after):
raise Exception('fURL changes after storage node restart')
# The client should actually still work, too.
runOnNode(client, [[exercise_storage, "/tmp/client"]])
# The issuer metrics should be accessible from the monitoring network.
issuer.execute('ifconfig lo:fauxvpn 172.23.23.2/24')
issuer.wait_until_succeeds("nc -z 172.23.23.2 80")
issuer.succeed('curl --silent --insecure --fail --output /dev/null http://172.23.23.2/metrics')
# The issuer metrics should NOT be accessible from any other network.
issuer.fail('curl --silent --insecure --fail --output /dev/null http://localhost/metrics')
client.fail('curl --silent --insecure --fail --output /dev/null http://issuer/metrics')
issuer.execute('ifconfig lo:fauxvpn down')
def test():
start_all()
# After the service starts, destroy the "created" marker to force it to
# re-create its internal state.
storage.wait_for_open_port(4001)
storage.succeed("systemctl stop tahoe.storage")
storage.succeed("rm /var/db/tahoe-lafs/storage.created")
storage.succeed("systemctl start tahoe.storage")
# After it starts up again, verify it has consistent internal state and a
# backup of the prior state.
storage.wait_for_open_port(4001)
storage.succeed("[ -e /var/db/tahoe-lafs/storage ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.created ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.1 ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.1/private/node.privkey ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.1/private/node.pem ]")
storage.succeed("[ ! -e /var/db/tahoe-lafs/storage.2 ]")
# Stop it again, once again destroy the "created" marker, and this time also
# jam some partial state in the way that will need cleanup.
storage.succeed("systemctl stop tahoe.storage")
storage.succeed("rm /var/db/tahoe-lafs/storage.created")
storage.succeed("mkdir -p /var/db/tahoe-lafs/storage.atomic/partial")
try:
storage.succeed("systemctl start tahoe.storage")
except:
x, y = storage.execute("journalctl -u tahoe.storage")
storage.log(y)
raise
# After it starts up again, verify it has consistent internal state and
# backups of the prior two states. It also has no copy of the inconsistent
# state because it could never have been used.
storage.wait_for_open_port(4001)
storage.succeed("[ -e /var/db/tahoe-lafs/storage ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.created ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.1 ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.2 ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.2/private/node.privkey ]")
storage.succeed("[ -e /var/db/tahoe-lafs/storage.2/private/node.pem ]")
storage.succeed("[ ! -e /var/db/tahoe-lafs/storage.atomic ]")
storage.succeed("[ ! -e /var/db/tahoe-lafs/storage/partial ]")
storage.succeed("[ ! -e /var/db/tahoe-lafs/storage.3 ]")
# The overall unit test suite for PrivateStorageio NixOS configuration.
{ pkgs }:
let
pkgs = import <nixpkgs> { };
# Total the numbers in a list.
sum = builtins.foldl' (a: b: a + b) 0;
......
{ "name": "release2105"
, "url": "https://releases.nixos.org/nixos/21.05/nixos-21.05.804.5de44c15758/nixexprs.tar.xz"
, "sha256": "002zvc16hyrbs0icx1qj255c9dqjpdxx4bhhfjndlj3kwn40by0m"
}
import (builtins.fetchTarball (builtins.fromJSON (builtins.readFile ./nixpkgs-2105.json)))
{ "name": "nixpkgs"
, "url": "https://github.com/PrivateStorageio/nixpkgs/archive/5ebd5af2d5c6caf23735c8c0e6bc27357fa8d2a8.tar.gz"
, "sha256": "1g2bvs8prqjskzv8s1qmh36k7rmj98jib0syqbrq02xxzw5dpqb4"
}
{
"name": "source",
"url": "https://releases.nixos.org/nixos/24.11/nixos-24.11.716716.7819a0d29d1d/nixexprs.tar.xz",
"sha256": "1ybrgaw9mhrrss09l2nscjl959f0ivlsa76a08qwzvpkhc200mn2"
}
\ No newline at end of file
import (builtins.fetchTarball (builtins.fromJSON (builtins.readFile ./nixpkgs-ps.json)))
import (builtins.fetchTarball (builtins.fromJSON (builtins.readFile ./nixpkgs.json)))
{ stdenv, lib, graphviz, python3Packages }:
stdenv.mkDerivation rec {
version = "0.0";
name = "privatestorageio-${version}";
src = lib.cleanSource ./.;
depsBuildBuild = [
graphviz
];
buildPhase = ''
${python3Packages.sphinx}/bin/sphinx-build -W docs/source docs/build
'';
installPhase = ''
mkdir $out
mv docs/build $out/docs
'';
}
let
release2105 = import ./nixpkgs-2105.nix { };
pinned-pkgs = import ./nixpkgs.nix { };
in
{ pkgs ? pinned-pkgs, lib ? pkgs.lib, python ? pkgs.python3 }:
let
tools = pkgs.callPackage ./tools {};
in
{ pkgs ? release2105 }:
pkgs.mkShell {
NIX_PATH = "nixpkgs=${pkgs.path}";
# When a path (such as `pkgs.path`) is interpolated into a string then nix
# first adds that path to the store, and then interpolates the store path
# into the string. We use `builtins.toString` to convert the path to a
# string without copying it to the store before interpolating. Either the
# path is already in the store (e.g. when `pkgs` is `pinned-pkgs`) so we
# avoid making a second copy with a longer name, or the user passed in local
# path (e.g. a checkout of nixpkgs) and we point at it directly, rather than
# a snapshot of it.
# See https://github.com/NixOS/nix/issues/200 and https://github.com/NixOS/nix/issues/1728
shellHook = ''
export NIX_PATH="nixpkgs=${builtins.toString pkgs.path}";
'';
# Run the shellHook from tools
inputsFrom = [tools];
buildInputs = [
tools
pkgs.cacert
pkgs.nix
pkgs.morph
pkgs.vagrant
pkgs.jp
];
}
{ pkgs, lib, makeWrapper, ... }:
let
python = pkgs.python3;
# This is a python envionment that has the dependencies
# for the development python scripts we use, and the
# helper library.
python-env = python.buildEnv.override {
extraLibs = [ python.pkgs.httpx ];
# Add `.pth` file pointing at the directory containg our helper library.
# This will get added to `sys.path` by `site.py`.
# See https://docs.python.org/3/library/site.html
postBuild = ''
echo ${lib.escapeShellArg ./pylib} > $out/${lib.escapeShellArg python.sitePackages}/tools.pth
'';
};
python-commands = [
./update-nixpkgs
./update-gitlab-repo
./update-github-repo
];
in
# This derivation creates a package that wraps our tools to setup an environment
# with there dependencies available.
pkgs.runCommand "ps_tools" {
nativeBuildInputs = [ makeWrapper ];
shellHook = ''
# Only display the help if we are running an interactive shell.
if [[ $- == *i* ]]; then
cat <<MOTD
Tools (pass --help for details):
${lib.concatStringsSep "\n" (map (path:
"- ${baseNameOf path}"
) python-commands)}
MOTD
fi
'';
} ''
mkdir -p $out/bin
${lib.concatStringsSep "\n" (map (path:
let
baseName = baseNameOf path;
# We use toString so that we wrap the in-tree scripts, rather than copying
# them to the nix-store. This means that we don't need to run nix-shell again
# to pick up changes.
sourcePath = toString path;
in
# makeWrapper <executable> <wrapperfile> <args>
# See https://nixos.org/manual/nixpkgs/stable/#fun-makeWrapper
"makeWrapper ${python-env}/bin/python $out/bin/${baseName} --add-flags ${sourcePath}"
) python-commands)}
''
......@@ -9,11 +9,9 @@
{ pathToGrid }:
let
grid = import pathToGrid;
vpnConfig = node: node.services.private-storage.monitoring.vpn or null;
vpnClientIP = node: (vpnConfig node).client.ip or null;
vpnServerIP = node: (vpnConfig node).server.ip or null;
in
vpnIP = node: node.config.grid.monitoringvpnIPv4 or null; # "or null" since "network" in grid doesn't have a monitoringIPv4
in rec
{
"serverIP" = vpnServerIP grid.monitoring;
"clientIPs" = builtins.filter (x: x != null) (map vpnClientIP (builtins.attrValues grid));
serverIP = vpnIP grid.monitoring;
clientIPs = builtins.filter (x: x != serverIP && x != null) (map vpnIP (builtins.attrValues grid));
}
This directory contains a python package of helper functions used by the scripts in ``tools/``.
To get this on the python path, run ``nix-shell`` in the root of the repository.
"""
Helpers for development and CI scripts.
"""
from __future__ import annotations
import subprocess
def get_url_hash(hash_type, name, url) -> dict[str, str]:
"""
Get the nix hash of the given URL.
:returns: Dictionary of arguments suitable to pass to :nix:`pkgs.fetchzip`
or a function derived from it (such as :nix:`pkgs.fetchFromGitLab`)
to specify the hash.
"""
output = subprocess.run(
[
"nix-prefetch-url",
"--type",
hash_type,
"--unpack",
"--name",
name,
url,
],
capture_output=True,
check=True,
encoding="utf-8",
)
return {
"outputHashAlgo": hash_type,
"outputHash": output.stdout.strip(),
}
#!/usr/bin/env python
"""
Update a pinned github repository.
Pass this path to a JSON file and it will update it to the latest
version of the branch it specifies. You can also pass a different
branch or repository owner, which will update the file to point at
the new branch/repository, and update to the latest version.
"""
import argparse
import json
from pathlib import Path
import httpx
from ps_tools import get_url_hash
HASH_TYPE = "sha512"
ARCHIVE_TEMPLATE = "https://api.github.com/repos/{owner}/{repo}/tarball/{rev}"
BRANCH_TEMPLATE = (
"https://api.github.com/repos/{owner}/{repo}/commits/{branch}"
)
def get_github_commit(config):
response = httpx.get(BRANCH_TEMPLATE.format(**config))
response.raise_for_status()
return response.json()["sha"]
def get_github_archive_url(config):
return ARCHIVE_TEMPLATE.format(**config)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"repo_file",
metavar="repo-file",
type=Path,
help="JSON file with pinned configuration.",
)
parser.add_argument(
"--branch",
type=str,
help="Branch to update to.",
)
parser.add_argument(
"--owner",
type=str,
help="Repository owner to update to.",
)
parser.add_argument(
"--rev",
type=str,
help="Revision to pin.",
)
parser.add_argument(
"--dry-run",
action="store_true",
)
args = parser.parse_args()
repo_file = args.repo_file
config = json.loads(repo_file.read_text())
for key in ["owner", "branch"]:
if getattr(args, key) is not None:
config[key] = getattr(args, key)
if args.rev is not None:
config["rev"] = args.rev
else:
config["rev"] = get_github_commit(config)
archive_url = get_github_archive_url(config)
config.update(get_url_hash(HASH_TYPE, "source", archive_url))
output = json.dumps(config, indent=2)
if args.dry_run:
print(output)
else:
repo_file.write_text(output)
if __name__ == "__main__":
main()
#!/usr/bin/env python
"""
Update a pinned gitlab repository.
Pass this path to a JSON file and it will update it to the latest
version of the branch it specifies. You can also pass a different
branch or repository owner, which will update the file to point at
the new branch/repository, and update to the latest version.
"""
import argparse
import json
from pathlib import Path
import httpx
from ps_tools import get_url_hash
HASH_TYPE = "sha512"
ARCHIVE_TEMPLATE = "https://{domain}/api/v4/projects/{owner}%2F{repo}/repository/archive.tar.gz?sha={rev}"
BRANCH_TEMPLATE = (
"https://{domain}/api/v4/projects/{owner}%2F{repo}/repository/branches/{branch}"
)
def get_gitlab_commit(config):
response = httpx.get(BRANCH_TEMPLATE.format(**config))
response.raise_for_status()
return response.json()["commit"]["id"]
def get_gitlab_archive_url(config):
return ARCHIVE_TEMPLATE.format(**config)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"repo_file",
metavar="repo-file",
type=Path,
help="JSON file with pinned configuration.",
)
parser.add_argument(
"--branch",
type=str,
help="Branch to update to.",
)
parser.add_argument(
"--owner",
type=str,
help="Repository owner to update to.",
)
parser.add_argument(
"--rev",
type=str,
help="Revision to pin.",
)
parser.add_argument(
"--dry-run",
action="store_true",
)
args = parser.parse_args()
repo_file = args.repo_file
config = json.loads(repo_file.read_text())
for key in ["owner", "branch"]:
if getattr(args, key) is not None:
config[key] = getattr(args, key)
if args.rev is not None:
config["rev"] = args.rev
else:
config["rev"] = get_gitlab_commit(config)
archive_url = get_gitlab_archive_url(config)
config.update(get_url_hash(HASH_TYPE, "source", archive_url))
output = json.dumps(config, indent=2)
if args.dry_run:
print(output)
else:
repo_file.write_text(output)
if __name__ == "__main__":
main()
#!/usr/bin/env python
import argparse
import json
from pathlib import Path
import httpx
from ps_tools import get_url_hash
# We pass this to builtins.fetchTarball which only supports sha256
HASH_TYPE = "sha256"
DEFAULT_CHANNEL = "nixos-24.11"
CHANNEL_URL_TEMPLATE = "https://channels.nixos.org/{channel}/nixexprs.tar.xz"
def get_nixos_channel_url(*, channel):
"""
Get the URL for the current release of the given nixos channel.
`https://channels.nixos.org/<channel>` redirects to the path on
`https://releases.nixos.org` that corresponds to the current release
of that channel. This captures that redirect, so we can pin against
the release.
"""
response = httpx.head(
CHANNEL_URL_TEMPLATE.format(channel=channel), follow_redirects=False
)
assert response.is_redirect
return str(response.next_request.url)
def main():
parser = argparse.ArgumentParser(description="Update a pinned nixos repository.")
parser.add_argument(
"repo_file",
metavar="repo-file",
nargs="?",
default=Path(__file__).parent.with_name("nixpkgs.json"),
type=Path,
help="JSON file with pinned configuration.",
)
parser.add_argument(
"--dry-run",
action="store_true",
)
parser.set_defaults(channel=DEFAULT_CHANNEL)
args = parser.parse_args()
repo_file = args.repo_file
print(f"reading {repo_file}")
config = json.loads(repo_file.read_text())
print(f"read {config!r}")
config["url"] = get_nixos_channel_url(channel=args.channel)
hash_data = get_url_hash(HASH_TYPE, name=config["name"], url=config["url"])
config["sha256"] = hash_data["outputHash"]
output = json.dumps(config, indent=2)
if args.dry_run:
print(output)
else:
print(f"writing to {repo_file}")
repo_file.write_text(output)
if __name__ == "__main__":
main()