Newer
Older
def runOnNode(node, argv):
"""
Run a shell command on one of the nodes. The first argument is the name
of the node. The second is a list of the argv to run.
The program's output is piped to systemd-cat and the python fragment
evaluates to success if the command exits with a success status.
"""
try:
node.succeed('set -eo pipefail; {} | systemd-cat'.format(" ".join(argv)))
except Exception as e:
code, output = node.execute('cat /tmp/stdout /tmp/stderr')
introducer.log(output)
raise
"""
Generate a command which can be used with runOnNode to ssh to the given
host.
"""
return [
"chmod", "0400", "/tmp/ssh_key", ";",
"ssh", "-oStrictHostKeyChecking=no", "-i", "/tmp/ssh_key",
"{username}@{hostname}".format(username=username, hostname=hostname), ":",
]
def test(
pemFile,
run_introducer,
run_client,
get_passes,
exercise_storage,
introducerPort,
introducerFURL,
issuerURL,
ristrettoPublicKey,
voucher,
):
"""
"""
# Boot the VMs. We used to do them all in parallel but the boot
# sequence got flaky at some point for some reason I don't
# understand. :/ It might be related to this:
#
# https://discourse.nixos.org/t/nixos-ppc64le-vm-does-not-have-dev-vda-device/11548/9
#
# See <nixpkgs/nixos/modules/virtualisation/qemu-vm.nix> for the Nix
# that constructs the QEMU command that gets run.
#
# Boot them one at a time for now.
issuer.connect()
introducer.connect()
storage.connect()
client.connect()
api_stripe_com.connect()
# The issuer and the storage server should accept SSH connections. This
# doesn't prove it is so but if it fails it's a pretty good indication
# it isn't so.
storage.wait_for_open_port(22)
runOnNode(issuer, ssh("probeuser", sshPrivateKeyFile, "storage"))
runOnNode(issuer, ssh("root", sshPrivateKeyFile, "storage"))
issuer.wait_for_open_port(22)
runOnNode(storage, ssh("probeuser", sshPrivateKeyFile, "issuer"))
runOnNode(storage, ssh("root", sshPrivateKeyFile, "issuer"))
# Set up a Tahoe-LAFS introducer.
introducer.copy_from_host(pemFile, '/tmp/node.pem')
runOnNode(introducer, [run_introducer, "/tmp/node.pem", str(introducerPort), introducerFURL])
#
# Get a Tahoe-LAFS storage server up.
#
code, version = storage.execute('tahoe --version')
storage.log(version)
# The systemd unit should reach the running state.
storage.wait_for_unit('tahoe.storage.service')
# Some while after that the Tahoe-LAFS node should listen on the web API
# port. The port number here has to agree with the port number set in
# the private-storage.nix module.
storage.wait_for_open_port(3456)
# Once the web API is listening it should be possible to scrape some
# status from the node if it is really working.
storage.succeed('tahoe -d /var/db/tahoe-lafs/storage status')
# It should have Eliot logging turned on as well.
storage.succeed('[ -e /var/db/tahoe-lafs/storage/logs/eliot.json ]')
# Make sure the issuer is ready to accept connections.
issuer.wait_for_open_port(80)
#
# Storage appears to be working so try to get a client to speak with it.
#
runOnNode(client, [run_client, "/tmp/client", introducerFURL, issuerURL, ristrettoPublicKey, str(tokenCount)])
client.wait_for_open_port(3456)
# Make sure the fake Stripe API server is ready for requests.
try:
api_stripe_com.wait_for_open_port(80)
except:
code, output = api_stripe_com.execute('journalctl -u api.stripe.com')
api_stripe_com.log(output)
raise
# Get some ZKAPs from the issuer.
try:
runOnNode(client, [
get_passes,
"http://127.0.0.1:3456",
"/tmp/client/private/api_auth_token",
issuerURL,
voucher,
])
except:
# Dump the fake Stripe API server logs, too, since the error may arise
# from a PaymentServer/Stripe interaction.
for node, unit in [(api_stripe_com, "api.stripe.com"), (issuer, "zkapissuer")]:
code, output = node.execute(f'journalctl -u {unit}')
node.log(output)
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
raise
# The client should be prepped now. Make it try to use some storage.
runOnNode(client, [exercise_storage, "/tmp/client"])
# It should be possible to restart the storage service without the
# storage node fURL changing.
furlfile = '/var/db/tahoe-lafs/storage/private/storage-plugin.privatestorageio-zkapauthz-v1.furl'
before = storage.execute('cat ' + furlfile)
runOnNode(storage, ["systemctl", "restart", "tahoe.storage"])
after = storage.execute('cat ' + furlfile)
if (before != after):
raise Exception('fURL changes after storage node restart')
# The client should actually still work, too.
runOnNode(client, [exercise_storage, "/tmp/client"])
# The issuer metrics should be accessible from the monitoring network.
issuer.execute('ifconfig lo:fauxvpn 172.23.23.2/24')
issuer.wait_until_succeeds("nc -z 172.23.23.2 80")
issuer.succeed('curl --silent --insecure --fail --output /dev/null http://172.23.23.2/metrics')
# The issuer metrics should NOT be accessible from any other network.
issuer.fail('curl --silent --insecure --fail --output /dev/null http://localhost/metrics')
client.fail('curl --silent --insecure --fail --output /dev/null http://issuer/metrics')
issuer.execute('ifconfig lo:fauxvpn down')