Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
def runOnNode(node, argv):
"""
Run a shell command on one of the nodes. The first argument is the name
of the node. The second is a list of the argv to run.
The program's output is piped to systemd-cat and the python fragment
evaluates to success if the command exits with a success status.
"""
try:
node.succeed('set -eo pipefail; {} | systemd-cat'.format(" ".join(argv)))
except Exception as e:
code, output = node.execute('cat /tmp/stdout /tmp/stderr')
introducer.log(output)
raise
def ssh(username, sshPrivateKey, hostname):
"""
Generate a command which can be used with runOnNode to ssh to the given
host.
"""
return [
"cp", sshPrivateKey, "/tmp/ssh_key", ";",
"chmod", "0400", "/tmp/ssh_key", ";",
"ssh", "-oStrictHostKeyChecking=no", "-i", "/tmp/ssh_key",
"{username}@{hostname}".format(username=username, hostname=hostname), ":",
]
def test(
sshPrivateKey,
pemFile,
run_introducer,
run_client,
get_passes,
exercise_storage,
introducerPort,
introducerFURL,
issuerURL,
ristrettoPublicKey,
voucher,
):
"""
"""
# Boot the VMs. We used to do them all in parallel but the boot
# sequence got flaky at some point for some reason I don't
# understand. :/ It might be related to this:
#
# https://discourse.nixos.org/t/nixos-ppc64le-vm-does-not-have-dev-vda-device/11548/9
#
# See <nixpkgs/nixos/modules/virtualisation/qemu-vm.nix> for the Nix
# that constructs the QEMU command that gets run.
#
# Boot them one at a time for now.
issuer.connect()
introducer.connect()
storage.connect()
client.connect()
api_stripe_com.connect()
# The issuer and the storage server should accept SSH connections. This
# doesn't prove it is so but if it fails it's a pretty good indication
# it isn't so.
storage.wait_for_open_port(22)
runOnNode(issuer, ssh("probeuser", sshPrivateKey, "storage"))
runOnNode(issuer, ssh("root", sshPrivateKey, "storage"))
issuer.wait_for_open_port(22)
runOnNode(storage, ssh("probeuser", sshPrivateKey, "issuer"))
runOnNode(storage, ssh("root", sshPrivateKey, "issuer"))
# Set up a Tahoe-LAFS introducer.
introducer.copy_from_host(pemFile, '/tmp/node.pem')
runOnNode(introducer, [run_introducer, "/tmp/node.pem", str(introducerPort), introducerFURL])
#
# Get a Tahoe-LAFS storage server up.
#
code, version = storage.execute('tahoe --version')
storage.log(version)
# The systemd unit should reach the running state.
storage.wait_for_unit('tahoe.storage.service')
# Some while after that the Tahoe-LAFS node should listen on the web API
# port. The port number here has to agree with the port number set in
# the private-storage.nix module.
storage.wait_for_open_port(3456)
# Once the web API is listening it should be possible to scrape some
# status from the node if it is really working.
storage.succeed('tahoe -d /var/db/tahoe-lafs/storage status')
# It should have Eliot logging turned on as well.
storage.succeed('[ -e /var/db/tahoe-lafs/storage/logs/eliot.json ]')
#
# Storage appears to be working so try to get a client to speak with it.
#
runOnNode(client, [run_client, "/tmp/client", introducerFURL, issuerURL, ristrettoPublicKey])
client.wait_for_open_port(3456)
# Make sure the fake Stripe API server is ready for requests.
try:
api_stripe_com.wait_for_unit("api.stripe.com")
except:
code, output = api_stripe_com.execute('journalctl -u api.stripe.com')
api_stripe_com.log(output)
raise
# Get some ZKAPs from the issuer.
try:
runOnNode(client, [
get_passes,
"http://127.0.0.1:3456",
"/tmp/client/private/api_auth_token",
issuerURL,
voucher,
])
except:
# Dump the fake Stripe API server logs, too, since the error may arise
# from a PaymentServer/Stripe interaction.
code, output = api_stripe_com.execute('journalctl -u api.stripe.com')
api_stripe_com.log(output)
raise
# The client should be prepped now. Make it try to use some storage.
runOnNode(client, [exercise_storage, "/tmp/client"])
# It should be possible to restart the storage service without the
# storage node fURL changing.
furlfile = '/var/db/tahoe-lafs/storage/private/storage-plugin.privatestorageio-zkapauthz-v1.furl'
before = storage.execute('cat ' + furlfile)
runOnNode(storage, ["systemctl", "restart", "tahoe.storage"])
after = storage.execute('cat ' + furlfile)
if (before != after):
raise Exception('fURL changes after storage node restart')
# The client should actually still work, too.
runOnNode(client, [exercise_storage, "/tmp/client"])
# The issuer metrics should be accessible from the monitoring network.
issuer.execute('ifconfig lo:fauxvpn 172.23.23.2/24')
issuer.wait_until_succeeds("nc -z 172.23.23.2 80")
issuer.succeed('curl --silent --insecure --fail --output /dev/null http://172.23.23.2/metrics')
# The issuer metrics should NOT be accessible from any other network.
issuer.fail('curl --silent --insecure --fail --output /dev/null http://localhost/metrics')
client.fail('curl --silent --insecure --fail --output /dev/null http://issuer/metrics')
issuer.execute('ifconfig lo:fauxvpn down')