Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision

Target

Select target project
  • tomprince/PrivateStorageio
  • privatestorage/PrivateStorageio
2 results
Select Git revision
Show changes
Showing
with 1102 additions and 313 deletions
{ lib, ... }: {
{ name, lib, ... }: {
imports = [ <nixpkgs/nixos/modules/virtualisation/amazon-image.nix> ];
# amazon-image.nix isn't quite aware of nvme-attached storage so give it a
......@@ -6,10 +6,48 @@
boot.loader.grub.device = lib.mkForce "/dev/nvme0n1";
ec2.hvm = true;
boot.kernel.sysctl = { "vm.swappiness" = 0; };
boot.kernel.sysctl = { "vm.swappiness" = 1; };
swapDevices = [ {
device = "/var/swapfile";
size = 8192; # megabytes
size = 1024; # megabytes
randomEncryption = true;
} ];
# If we don't manually and explicitly early-load the loop module, crypt-swap
# setup fails with the not very helpful message: "loop device with autoclear
# flag is required"
# See https://unix.stackexchange.com/a/554500/81275
boot.kernelModules = [ "loop" ];
# NixOS likes to fill up boot partitions with (by default) 100 old kernels.
# Keep a (for us) more reasonable number around.
boot.loader.grub.configurationLimit = 8;
# Break the tie between AWS and morph for the hostname by forcing the
# morph-supplied name. See also
# <https://github.com/DBCDK/morph/issues/146>.
networking.hostName = name;
# Mount a dedicated filesystem (ideally on a dedicated volume, but that's
# beyond control of this particular part of the system) for the
# PaymentServer voucher database. This makes it easier to manage for
# tasks like backup/recovery and encryption.
services.private-storage-issuer.databaseFileSystem = {
label = "zkapissuer-data";
};
# Clean up packages after a while
nix.gc = {
automatic = true;
dates = "weekly";
options = "--delete-older-than 30d";
};
# Turn on automatic optimization of nix store
# https://nixos.wiki/wiki/Storage_optimization
nix.settings.auto-optimise-store = true;
# Most of the time, we have ample free & usable memory, but when upgrading
# software, we sometimes run out because of Nix. This is supposed to help:
zramSwap.enable = true;
}
{ modulesPath, name, lib, ... }: {
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
boot.loader.grub.device = "/dev/sda";
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "xen_blkfront" ];
boot.initrd.kernelModules = [ "nvme" ];
boot.kernel.sysctl = { "vm.swappiness" = 1; };
fileSystems."/" = { device = "/dev/sda1"; fsType = "ext4"; };
swapDevices = [ {
device = "/var/swapfile";
size = 4096; # megabytes
randomEncryption = true;
} ];
# Break the tie between AWS and morph for the hostname by forcing the
# morph-supplied name. See also
# <https://github.com/DBCDK/morph/issues/146>.
networking.hostName = name;
# Mount a dedicated filesystem (ideally on a dedicated volume, but that's
# beyond control of this particular part of the system) for the
# PaymentServer voucher database. This makes it easier to manage for
# tasks like backup/recovery and encryption.
services.private-storage-issuer.databaseFileSystem = {
label = "zkapissuer-data";
};
# Clean up packages after a while
nix.gc = {
automatic = true;
dates = "weekly";
options = "--delete-older-than 30d";
};
# Turn on automatic optimization of nix store
# https://nixos.wiki/wiki/Storage_optimization
nix.settings.auto-optimise-store = true;
}
{ modulesPath, name, lib, ... }: {
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
boot.loader.grub.device = "/dev/sda";
boot.kernel.sysctl = { "vm.swappiness" = 1; };
fileSystems."/" = { device = "/dev/sda3"; fsType = "ext4"; };
swapDevices = [ {
device = "/dev/sda2";
randomEncryption = true;
} ];
# Break the tie between AWS and morph for the hostname by forcing the
# morph-supplied name. See also
# <https://github.com/DBCDK/morph/issues/146>.
networking.hostName = name;
# Mount a dedicated filesystem (ideally on a dedicated volume, but that's
# beyond control of this particular part of the system) for the
# PaymentServer voucher database. This makes it easier to manage for
# tasks like backup/recovery and encryption.
services.private-storage-issuer.databaseFileSystem = {
label = "zkapissuer-data";
};
# Clean up packages after a while
nix.gc = {
automatic = true;
dates = "weekly";
options = "--delete-older-than 30d";
};
# Turn on automatic optimization of nix store
# https://nixos.wiki/wiki/Storage_optimization
nix.settings.auto-optimise-store = true;
}
# This is all of the static NixOS system configuration necessary to specify an
# "issuer"-type system. The configuration has various holes in it which must
# be filled somehow. These holes correspond to configuration which is not
# statically known. This value is suitable for use as a module to be imported
# into a more complete system configuration. It is expected that the holes
# will be filled by a sibling module created by ``customize-issuer.nix``.
rec {
# This contains all of the NixOS system configuration necessary to specify an
# "issuer"-type system.
{ lib, config, ...}:
let
inherit (config.grid) privateKeyPath;
inherit (config.grid.issuer) issuerDomains allowedChargeOrigins tokensPerVoucher;
in {
imports = [
./monitoringvpn-client.nix
];
options.grid.issuer = {
issuerDomains = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
A list of strings giving the domain names that point at this issuer
system. These will all be included in Let's Encrypt certificate.
'';
};
allowedChargeOrigins = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
A list of strings giving CORS Origins will the issuer will be configured
to allow.
'';
};
tokensPerVoucher = lib.mkOption {
default = null;
type = lib.types.nullOr lib.types.int;
example = 50000;
description = ''
If not null, a value to pass to PaymentServer for
``--tokens-per-voucher``.
'';
};
};
config = {
deployment = {
secrets = {
"ristretto-signing-key" = {
destination = "/run/keys/ristretto.signing-key";
owner.user = "root";
owner.group = "root";
source = "${privateKeyPath}/ristretto.signing-key";
owner.user = "zkapissuer";
owner.group = "zkapissuer";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "zkapissuer.service"];
};
"stripe-secret-key" = {
destination = "/run/keys/stripe.secret-key";
owner.user = "root";
owner.group = "root";
source = "${privateKeyPath}/stripe.secret";
owner.user = "zkapissuer";
owner.group = "zkapissuer";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "zkapissuer.service"];
};
"monitoringvpn-secret-key" = {
destination = "/run/keys/monitoringvpn/client.key";
owner.user = "root";
owner.group = "root";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "wireguard-monitoringvpn.service"];
};
"monitoringvpn-preshared-key" = {
destination = "/run/keys/monitoringvpn/preshared.key";
owner.user = "root";
owner.group = "root";
"stripe-webhook-secret-key" = {
destination = "/run/keys/stripe.webhook-secret-key";
source = "${privateKeyPath}/stripe.webhook-secret";
owner.user = "zkapissuer";
owner.group = "zkapissuer";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "wireguard-monitoringvpn.service"];
action = ["sudo" "systemctl" "restart" "zkapissuer.service"];
};
};
};
imports = [
# Allow us to remotely trigger updates to this system.
../../nixos/modules/deployment.nix
../../nixos/modules/issuer.nix
../../nixos/modules/monitoring/vpn/client.nix
../../nixos/modules/monitoring/exporters/node.nix
];
services.private-storage-issuer = {
enable = true;
tls = true;
ristrettoSigningKeyPath = deployment.secrets.ristretto-signing-key.destination;
stripeSecretKeyPath = deployment.secrets.stripe-secret-key.destination;
ristrettoSigningKeyPath = config.deployment.secrets.ristretto-signing-key.destination;
stripeSecretKeyPath = config.deployment.secrets.stripe-secret-key.destination;
stripeWebhookSecretKeyPath = config.deployment.secrets.stripe-webhook-secret-key.destination;
database = "SQLite3";
databasePath = "/var/db/vouchers.sqlite3";
databasePath = "${config.fileSystems."zkapissuer-data".mountPoint}/vouchers.sqlite3";
inherit (config.grid) letsEncryptAdminEmail;
inherit allowedChargeOrigins;
domains = issuerDomains;
inherit tokensPerVoucher;
};
services.private-storage.monitoring.exporters.node.enable = true;
system.stateVersion = "19.03";
};
}
# Similar to ``issuer.nix`` but for a "monitoring"-type system. Holes are
# filled by ``customize-monitoring.nix``.
rec {
deployment = {
secrets = {
# This contains all of the NixOS system configuration necessary to specify an
# "monitoring"-type system.
{ lib, config, nodes, ...}:
let
cfg = config.grid.monitoring;
inherit (config.grid) publicKeyPath privateKeyPath monitoringvpnIPv4 letsEncryptAdminEmail;
# This collects information about monitored hosts from their configuration for use below.
monitoringHosts = lib.mapAttrsToList (name: node: rec {
inherit name;
vpnIPv4 = node.config.grid.monitoringvpnIPv4;
vpnHostName = "${name}.monitoringvpn";
hostNames = [name vpnHostName];
}) nodes;
# A set mapping VPN IP addresses as strings to lists of hostnames as
# strings. The system's ``/etc/hosts`` will be populated with this
# information. Apart from helping with normal forward resolution, this
# *also* gives us reverse resolution from the VPN IPs to hostnames which
# allows Grafana to show us hostnames instead of VPN IP addresses.
hostsMap = lib.listToAttrs (map (node: lib.nameValuePair node.vpnIPv4 node.hostNames) monitoringHosts);
# A list of VPN IP addresses as strings indicating which clients will be
# allowed onto the VPN.
vpnClientIPs = lib.remove monitoringvpnIPv4 (map (node: node.vpnIPv4) monitoringHosts);
# A list of VPN clients (IP addresses or hostnames) as strings indicating
# which nodes to scrape "nodeExporter" metrics from.
nodeExporterTargets = map (node: node.vpnHostName) monitoringHosts;
in {
imports = [
../../nixos/modules/monitoring/vpn/server.nix
../../nixos/modules/monitoring/server/grafana.nix
../../nixos/modules/monitoring/server/prometheus.nix
../../nixos/modules/monitoring/server/loki.nix
../../nixos/modules/monitoring/exporters/blackbox.nix
];
options.grid.monitoring = {
paymentExporterTargets = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
A list of VPN clients (IP addresses or hostnames) as strings indicating
which nodes to scrape PaymentServer metrics from.
'';
};
blackboxExporterHttpsTargets = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
A list of HTTPS servers (URLs, IP addresses or hostnames) as strings indicating
which nodes the BlackboxExporter should scrape HTTP and TLS metrics from.
'';
};
monitoringDomains = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
A list of strings giving the domain names that point at this monitoring
system. These will all be included in Let's Encrypt certificate.
'';
};
googleOAuthClientID = lib.mkOption {
type = lib.types.str;
default = "";
description = ''
A string containing the GSuite OAuth2 ClientID to use to authenticate
logins to Grafana.
'';
};
enableSlackAlert = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
Whether to enable alerting via Slack.
When true requires a grafana-slack-url file (see private-keys/README.rst).
'';
};
enableZulipAlert = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
Whether to enable alerting via Zulip.
When true requires a grafana-zulip-url file (see private-keys/README.rst).
'';
};
};
config = {
assertions = [
{
assertion = let
vpnIPs = (map (node: node.vpnIPv4) monitoringHosts);
in vpnIPs == lib.unique vpnIPs;
message = ''
Duplicate grid.monitoringvpnIPv4 values specified for different nodes.
'';
}
];
deployment.secrets = lib.mkMerge [
{
"monitoringvpn-private-key" = {
destination = "/run/keys/monitoringvpn/server.key";
source = "${privateKeyPath}/monitoringvpn/server.key";
owner.user = "root";
owner.group = "root";
permissions = "0400";
......@@ -12,29 +112,74 @@ rec {
};
"monitoringvpn-preshared-key" = {
destination = "/run/keys/monitoringvpn/preshared.key";
source = "${privateKeyPath}/monitoringvpn/preshared.key";
owner.user = "root";
owner.group = "root";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "wireguard-monitoringvpn.service"];
};
"grafana-admin-password" = {
source = "${privateKeyPath}/grafana-admin.password";
destination = "/run/keys/grafana-admin.password";
owner.user = config.systemd.services.grafana.serviceConfig.User;
owner.group = config.users.users.grafana.group;
permissions = "0400";
action = ["sudo" "systemctl" "restart" "grafana.service"];
};
}
(lib.mkIf (cfg.googleOAuthClientID != "") {
"grafana-google-sso-secret" = {
source = "${privateKeyPath}/grafana-google-sso.secret";
destination = "/run/keys/grafana-google-sso.secret";
owner.user = config.systemd.services.grafana.serviceConfig.User;
owner.group = config.users.users.grafana.group;
permissions = "0400";
action = ["sudo" "systemctl" "restart" "grafana.service"];
};
})
(lib.mkIf cfg.enableSlackAlert {
"grafana-slack-url" = {
source = "${privateKeyPath}/grafana-slack-url";
destination = "/run/keys/grafana-slack-url";
owner.user = config.systemd.services.grafana.serviceConfig.User;
owner.group = config.users.users.grafana.group;
permissions = "0400";
action = ["sudo" "systemctl" "restart" "grafana.service"];
};
})
(lib.mkIf cfg.enableZulipAlert {
"grafana-zulip-url" = {
source = "${privateKeyPath}/grafana-zulip-url";
destination = "/run/keys/grafana-zulip-url";
owner.user = config.systemd.services.grafana.serviceConfig.User;
owner.group = config.users.users.grafana.group;
permissions = "0400";
action = ["sudo" "systemctl" "restart" "grafana.service"];
};
})
];
imports = [
# Allow us to remotely trigger updates to this system.
../../nixos/modules/deployment.nix
networking.hosts = hostsMap;
../../nixos/modules/monitoring/vpn/server.nix
../../nixos/modules/monitoring/server/grafana.nix
../../nixos/modules/monitoring/server/prometheus.nix
../../nixos/modules/monitoring/exporters/node.nix
# Loki 0.3.0 from Nixpkgs 19.09 is too old and does not work:
# ../../nixos/modules/monitoring/server/loki.nix
];
services.private-storage.monitoring.vpn.server = {
enable = true;
ip = monitoringvpnIPv4;
inherit vpnClientIPs;
pubKeysPath = "${publicKeyPath}/monitoringvpn";
};
services.private-storage.monitoring.prometheus = {
inherit nodeExporterTargets;
inherit (cfg) paymentExporterTargets blackboxExporterHttpsTargets;
nginxExporterTargets = [];
};
services.private-storage.monitoring.grafana = {
domain = "monitoring.private.storage";
prometheusUrl = "http://localhost:9090/";
lokiUrl = "http://localhost:3100/";
inherit (cfg) googleOAuthClientID enableSlackAlert enableZulipAlert;
inherit letsEncryptAdminEmail;
domains = cfg.monitoringDomains;
};
services.private-storage.monitoring.exporters.node.enable = true;
};
}
{ lib, config, ...}:
let
inherit (config.grid) publicKeyPath privateKeyPath monitoringvpnEndpoint monitoringvpnIPv4;
in {
config = {
deployment = {
secrets = {
"monitoringvpn-secret-key" = {
destination = "/run/keys/monitoringvpn/client.key";
source = "${privateKeyPath}/monitoringvpn/${monitoringvpnIPv4}.key";
owner.user = "root";
owner.group = "root";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "wireguard-monitoringvpn.service"];
};
"monitoringvpn-preshared-key" = {
destination = "/run/keys/monitoringvpn/preshared.key";
source = "${privateKeyPath}/monitoringvpn/preshared.key";
owner.user = "root";
owner.group = "root";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "wireguard-monitoringvpn.service"];
};
};
};
services.private-storage.monitoring.vpn.client = {
enable = true;
ip = monitoringvpnIPv4;
endpoint = monitoringvpnEndpoint;
endpointPublicKeyFile = "${publicKeyPath}/monitoringvpn/server.pub";
};
};
}
# Similar to ``issuer.nix`` but for a "storage"-type system. Holes are filled
# by ``customize-storage.nix``.
rec {
# This contains all of the NixOS system configuration necessary to specify an
# "storage"-type system.
{ lib, config, ...} :
let
inherit (config.grid) privateKeyPath;
in {
# Any extra NixOS modules to load on this server.
imports = [
./monitoringvpn-client.nix
./borgbackup.nix
];
options.grid.storage = {
passValue = lib.mkOption {
type = lib.types.int;
description = ''
An integer giving the value of a single pass in byte×months.
'';
};
publicStoragePort = lib.mkOption {
type = lib.types.port;
description = ''
An integer giving the port number to include in Tahoe storage service
advertisements and on which to listen for storage connections.
'';
};
};
config = {
deployment = {
secrets = {
"ristretto-signing-key" = {
destination = "/run/keys/ristretto.signing-key";
source = "${privateKeyPath}/ristretto.signing-key";
owner.user = "root";
owner.group = "root";
permissions = "0400";
......@@ -13,41 +41,21 @@ rec {
# extract it from the tahoe-lafs nixos module somehow?
action = ["sudo" "systemctl" "restart" "tahoe.storage.service"];
};
"monitoringvpn-secret-key" = {
destination = "/run/keys/monitoringvpn/client.key";
owner.user = "root";
owner.group = "root";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "wireguard-monitoringvpn.service"];
};
"monitoringvpn-preshared-key" = {
destination = "/run/keys/monitoringvpn/preshared.key";
owner.user = "root";
owner.group = "root";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "wireguard-monitoringvpn.service"];
};
};
};
# Any extra NixOS modules to load on this server.
imports = [
# Allow us to remotely trigger updates to this system.
../../nixos/modules/deployment.nix
# Bring in our module for configuring the Tahoe-LAFS service and other
# Private Storage-specific things.
../../nixos/modules/private-storage.nix
# Connect to the monitoringvpn.
../../nixos/modules/monitoring/vpn/client.nix
# Expose base system metrics over the monitoringvpn.
../../nixos/modules/monitoring/exporters/node.nix
];
services.private-storage.monitoring.exporters.node.enable = true;
services.private-storage.monitoring.exporters.tahoe.enable = true;
services.private-storage.borgbackup.enable = lib.mkDefault true;
# Turn on the Private Storage (Tahoe-LAFS) service.
services.private-storage = {
# Yep. Turn it on.
enable = true;
# Give it the Ristretto signing key to support authorization.
ristrettoSigningKeyPath = deployment.secrets.ristretto-signing-key.destination;
ristrettoSigningKeyPath = config.deployment.secrets.ristretto-signing-key.destination;
inherit (config.grid.storage) passValue publicStoragePort;
};
};
}
# Minimal configuration that vagrant depends on
{ config, pkgs, ... }:
let
# Vagrant uses an insecure shared private key by default, but we
# don't use the authorizedKeys attribute under users because it should be
# removed on first boot and replaced with a random one. This script sets
# the correct permissions and installs the temporary key if no
# ~/.ssh/authorized_keys exists.
install-vagrant-ssh-key = pkgs.writeScriptBin "install-vagrant-ssh-key" ''
#!${pkgs.runtimeShell}
if [ ! -e ~/.ssh/authorized_keys ]; then
mkdir -m 0700 -p ~/.ssh
echo "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key" >> ~/.ssh/authorized_keys
chmod 0600 ~/.ssh/authorized_keys
fi
'';
in
{
# Services to enable:
# Enable the OpenSSH daemon.
services.openssh.enable = true;
# Wireguard kernel module
boot.extraModulePackages = [ config.boot.kernelPackages.wireguard ];
# Enable DBus
services.dbus.enable = true;
# Replace ntpd by timesyncd
services.timesyncd.enable = true;
# Packages for Vagrant
environment.systemPackages = with pkgs; [
findutils
gnumake
iputils
jq
nettools
netcat
nfs-utils
rsync
];
users.users.root = { password = "vagrant"; };
# Creates a "vagrant" group & user with password-less sudo access
users.groups.vagrant = {
name = "vagrant";
members = [ "vagrant" ];
};
users.extraUsers.vagrant = {
isNormalUser = true;
createHome = true;
group = "vagrant";
extraGroups = [ "users" "wheel" ];
password = "vagrant";
home = "/home/vagrant";
useDefaultShell = true;
};
systemd.services.install-vagrant-ssh-key = {
description = "Vagrant SSH key install (if needed)";
after = [ "fs.target" ];
wants = [ "fs.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${install-vagrant-ssh-key}/bin/install-vagrant-ssh-key";
User = "vagrant";
# So it won't be (needlessly) restarted:
RemainAfterExit = true;
};
};
security.sudo.wheelNeedsPassword = false;
security.sudo.extraConfig =
''
Defaults:root,%wheel env_keep+=LOCALE_ARCHIVE
Defaults:root,%wheel env_keep+=NIX_PATH
Defaults:root,%wheel env_keep+=TERMINFO_DIRS
Defaults env_keep+=SSH_AUTH_SOCK
Defaults lecture = never
root ALL=(ALL) SETENV: ALL
%wheel ALL=(ALL) NOPASSWD: ALL, SETENV: ALL
'';
}
{ callPackage }:
{
/* A library of tools useful for writing tests with Nix.
*/
testing = callPackage ./testing.nix { };
}
# Thank you: https://gist.github.com/petabyteboy/558ffddb9aeb24e1eab2d5d6d021b5d7
with import <nixpkgs/lib>;
rec {
# FIXME: add case for negative numbers
pow = base: exponent: if exponent == 0 then 1 else fold (
x: y: y * base
) base (
range 2 exponent
);
fromHexString = hex: foldl (
x: y: 16 * x + (
(
listToAttrs (
map (
x: nameValuePair (
toString x
) x
) (
range 0 9
)
) // {
"a" = 10;
"b" = 11;
"c" = 12;
"d" = 13;
"e" = 14;
"f" = 15;
}
).${y}
)
) 0 (
stringToCharacters (
removePrefix "0x" (
hex
)
)
);
ipv4 = rec {
decode = address: foldl (
x: y: 256 * x + y
) 0 (
map toInt (
splitString "." address
)
);
encode = num: concatStringsSep "." (
map (
x: toString (mod (num / x) 256)
) (
reverseList (
genList (
x: pow 2 (x * 8)
) 4
)
)
);
netmask = prefixLength: (
foldl (
x: y: 2 * x + 1
) 0 (
range 1 prefixLength
)
) * (
pow 2 (
32 - prefixLength
)
);
reverseZone = net: (
concatStringsSep "." (
reverseList (
splitString "." net
)
)
) + ".in-addr.arpa";
eachAddress = net: prefixLength: genList (
x: decode (
x + (
decode net
)
)
) (
pow 2 (
32 - prefixLength
)
);
networkOf = address: prefixLength: encode (
bitAnd (
decode address
) (
netmask prefixLength
)
);
isInNetwork = net: address: networkOf address == net;
/* nixos-specific stuff */
findOwnAddress = config: net: head (
filter (
isInNetwork net
) (
configuredAddresses config
)
);
configuredAddresses = config: concatLists (
mapAttrsToList (
name: iface: iface.ipv4.addresses
) config.networking.interfaces
);
};
ipv6 = rec {
expand = address: (
replaceStrings ["::"] [(
concatStringsSep "0" (
genList (x: ":") (
9 - (count (x: x == ":") (stringToCharacters address))
)
)
)] address
) + (
if hasSuffix "::" address then
"0"
else
""
);
decode = address: map fromHexString (
splitString ":" (
expand address
)
);
encode = address: toLower (
concatStringsSep ":" (
map toHexString address
)
);
netmask = prefixLength: map (
x: if prefixLength > x + 16 then
(pow 2 16) - 1
else if prefixLength < x then
0
else
(
foldl (
x: y: 2 * x + 1
) 0 (
range 1 (prefixLength - x)
)
) * (
pow 2 (
16 - (prefixLength - x)
)
)
) (
genList (
x: x * 16
) 8
);
reverseZone = net: (
concatStringsSep "." (
concatLists (
reverseList (
map (
x: stringToCharacters (fixedWidthString 4 "0" x)
) (
splitString ":" (
expand net
)
)
)
)
)
) + ".ip6.arpa";
networkOf = address: prefixLength: encode (
zipListsWith bitAnd (
decode address
) (
netmask prefixLength
)
);
isInNetwork = net: address: networkOf address == (expand net);
/* nixos-specific stuff */
findOwnAddress = config: net: head (
filter (
isInNetwork net
) (
configuredAddresses config
)
);
configuredAddresses = config: concatLists (
mapAttrsToList (
name: iface: iface.ipv6.addresses
) config.networking.interfaces
);
};
}
{ ...}:
{
/* Returns a string that runs tests from the Python code at the given path.
The Python code is loaded using *execfile* and the *test* global it
defines is called with the given keyword arguments.
Type: makeTestScript :: Path -> AttrSet -> String
Example:
testScript = (makeTestScript ./test_foo.py { x = "y"; });
*/
makeTestScript = { testpath, kwargs ? {} }:
''
# The driver runs pyflakes on this script before letting it
# run... Convince pyflakes that there is a `test` name.
def test():
pass
with open("${testpath}") as testfile:
exec(testfile.read(), globals())
# For simple types, JSON is compatible with Python syntax!
test(**${builtins.toJSON kwargs})
'';
}
......@@ -34,40 +34,41 @@ let
options = {
hostId = lib.mkOption
{ type = lib.types.str;
example = lib.literalExample "abcdefab";
example = "abcdefab";
description = "The 32-bit host ID of the machine, formatted as 8 hexadecimal characters.";
};
interface = lib.mkOption
{ type = lib.types.str;
example = lib.literalExample "eno0";
example = "eno0";
description = "The name of the network interface on which to configure a static address.";
};
publicIPv4 = lib.mkOption
{ type = lib.types.str;
example = lib.literalExample "192.0.2.0";
example = "192.0.2.0";
description = "The IPv4 address to statically assign to `interface`.";
};
prefixLength = lib.mkOption
{ type = lib.types.int;
example = lib.literalExample 24;
example = 24;
description = "The statically configured network's prefix length.";
};
gateway = lib.mkOption
{ type = lib.types.str;
example = lib.literalExample "192.0.2.1";
example = "192.0.2.1";
description = "The statically configured address of the network gateway.";
};
gatewayInterface = lib.mkOption
{ type = lib.types.str;
example = lib.literalExample "eno0";
example = "eno0";
description = "The name of the network interface for the default route.";
default = cfg.interface;
};
grubDeviceID = lib.mkOption
{ type = lib.types.str;
example = lib.literalExample "wwn-0x5000c500936410b9";
example = "wwn-0x5000c500936410b9";
description = "The ID of the disk on which to install grub.";
default = "nodev";
};
};
in {
......@@ -102,10 +103,11 @@ in {
# harder to deploy in the bootstrap environment.
config =
{ boot.loader.grub.enable = true;
boot.loader.grub.version = 2;
boot.loader.grub.device = "/dev/disk/by-id/${cfg.grubDeviceID}";
boot.loader.grub.device = if cfg.grubDeviceID == "nodev" then "nodev" else "/dev/disk/by-id/${cfg.grubDeviceID}";
boot.loader.timeout = 10;
# NixOS likes to fill up boot partitions with (by default) 100 old kernels.
# Keep a (for us) more reasonable number around.
boot.loader.grub.configurationLimit = 8;
networking.firewall.enable = false;
networking.hostId = cfg.hostId;
......
These are mostly modelled on upstream nixos modules.
They are generally fairly configurable (they don't tend to hard-code paths, they can be enabled or disabled).
They don't know anything about morph (e.g. ``deployment.secrets``) or how the different grids are configured (e.g. ``grid.publicKeyPath``).
Each module here tends to define one service (or group of related services) or feature.
Eventually, all of these will be imported automatically and controlled by ``services.private-storage.*.enabled`` options.
{
# Load modules that are sometimes universally useful and other times useful
# only for a specific service. Where functionality is not universally
# useful, it needs to be enabled by a node's configuration. By loading more
# modules (and therefore defining more options) than is strictly necessary
# for any single node the logic for supplying conditional configuration
# elsewhere is much simplified. For example, a Vagrant module can
# unconditionally set up a filesystem for PaymentServer. If PaymentServer
# is running on that node then it will get a Vagrant-appropriate
# configuration. If PaymentServer hasn't been enabled then the
# configuration will just be ignored.
imports = [
./packages.nix
./issuer.nix
./private-storage.nix
./monitoring/policy.nix
./monitoring/vpn/client.nix
./monitoring/exporters/node.nix
./monitoring/exporters/tahoe.nix
./monitoring/exporters/promtail.nix
];
}
......@@ -11,18 +11,12 @@ let
# `restrict` means "disable all the things" then `command` means "but
# enable running this one command" (the client does not have to supply the
# command; if they authenticate, this is the command that will run).
# environment lets us pass an environment variable into the process
# started by the given command. It only works because we configured our
# sshd to allow this particular variable through. By passing this value,
# we can pin nixpkgs in the executed command to the same version
# configured for use here. It might be better if we just had a channel
# the system could be configured with ... but we don't at the moment.
"restrict,environment=\"NIXPKGS_FOR_MORPH=${pkgs.path}\",command=\"${command} ${gridName}\" ${authorizedKey}";
"restrict,command=\"${command} ${gridName}\" ${authorizedKey}";
in {
options = {
services.private-storage.deployment.authorizedKey = lib.mkOption {
type = lib.types.str;
example = lib.literalExample ''
example = ''
ssh-ed25519 AAAAC3N...
'';
description = ''
......@@ -31,7 +25,7 @@ in {
};
services.private-storage.deployment.gridName = lib.mkOption {
type = lib.types.str;
example = lib.literalExample "staging";
example = "staging";
description = ''
The name of the grid configuration to use to update this deployment.
'';
......@@ -41,19 +35,15 @@ in {
config = {
# Configure the system to use our binary cache so that deployment updates
# only require downloading pre-built software, not building it ourselves.
nix = {
binaryCachePublicKeys = [
nix.settings = {
trusted-public-keys = [
"saxtons.private.storage:MplOcEH8G/6mRlhlKkbA8GdeFR3dhCFsSszrspE/ZwY="
];
binaryCaches = [
substituters = [
"http://saxtons.private.storage"
];
};
services.openssh.extraConfig = ''
PermitUserEnvironment=NIXPKGS_FOR_MORPH
'';
# Create a one-time service that will set up an ssh key that allows the
# deployment user to authorize as root to perform the system update with
# `morph deploy`.
......@@ -104,13 +94,11 @@ in {
# Configure the deployment user.
users.users.deployment = {
# Without some shell no login is possible at all, even to execute our
# single non-restricted command.
useDefaultShell = true;
# Without a home directory, lots of tools break.
createHome = true;
home = "/home/deployment";
# A user must be either normal or system. A normal user uses the
# default shell, has a home directory created for it at the usual
# location, and is in the "users" group. That's pretty much what we
# want for the deployment user.
isNormalUser = true;
packages = [
# update-deployment dependencies
......
# A NixOS module which can run a Ristretto-based issuer for PrivateStorage
# ZKAPs.
{ lib, pkgs, config, ... }: let
{ lib, pkgs, ourpkgs, config, ... }: let
cfg = config.services.private-storage-issuer;
in {
imports = [
# Give it a good SSH configuration.
../../nixos/modules/ssh.nix
];
options = {
services.private-storage-issuer.enable = lib.mkEnableOption "PrivateStorage ZKAP Issuer Service";
services.private-storage-issuer.package = lib.mkOption {
default = pkgs.zkapissuer.components.exes."PaymentServer-exe";
default = ourpkgs.zkapissuer;
type = lib.types.package;
example = lib.literalExample "pkgs.zkapissuer.components.exes.\"PaymentServer-exe\"";
example = lib.literalExpression "pkgs.zkapissuer.components.exes.\"PaymentServer-exe\"";
description = ''
The package to use for the ZKAP issuer.
'';
};
services.private-storage-issuer.domains = lib.mkOption {
type = lib.types.listOf lib.types.str;
example = lib.literalExample [ "payments.example.com" ];
example = [ "payments.example.com" ];
description = ''
The domain names at which the issuer is reachable.
'';
......@@ -37,12 +32,21 @@ in {
services.private-storage-issuer.issuer = lib.mkOption {
default = "Ristretto";
type = lib.types.enum [ "Trivial" "Ristretto" ];
example = lib.literalExample "Trivial";
example = "Trivial";
description = ''
The issuer algorithm to use. Either Trivial for a fake no-crypto
algorithm or Ristretto for Ristretto-flavored PrivacyPass.
'';
};
services.private-storage-issuer.tokensPerVoucher = lib.mkOption {
default = null;
type = lib.types.nullOr lib.types.int;
example = 50000;
description = ''
If not null, a value to pass to PaymentServer for
``--tokens-per-voucher``.
'';
};
services.private-storage-issuer.ristrettoSigningKeyPath = lib.mkOption {
default = null;
type = lib.types.path;
......@@ -58,6 +62,13 @@ in {
and payment management.
'';
};
services.private-storage-issuer.stripeWebhookSecretKeyPath = lib.mkOption {
type = lib.types.path;
description = ''
The path to a file containing a Stripe "webhook" secret key to use for
charge and payment management.
'';
};
services.private-storage-issuer.stripeEndpointDomain = lib.mkOption {
type = lib.types.str;
description = ''
......@@ -86,6 +97,15 @@ in {
The kind of voucher database to use.
'';
};
services.private-storage-issuer.databaseFileSystem = lib.mkOption {
# Logically, the type is the type of an entry in fileSystems - but we'll
# just let the type system enforce that when we pass the value on to
# fileSystems.
description = ''
Configuration for a filesystem to mount which will hold the issuer's
internal state database.
'';
};
services.private-storage-issuer.databasePath = lib.mkOption {
default = null;
type = lib.types.str;
......@@ -113,37 +133,76 @@ in {
config =
let
certroot = "/var/lib/letsencrypt/live";
# We'll refer to this collection of domains by the first domain in the
# list.
domain = builtins.head cfg.domains;
certServiceName = "cert-${domain}";
# Payment server internal http port (arbitrary, non-priviledged):
internalHttpPort = "1061";
# The "-vN" suffix indicates that this Nth incompatible version of on
# disk state as managed by this deployment system. This does not have
# anything to do with what's inside the PaymentServer-managed state.
# Instead it's about things like the type of filesystem used or options
# having to do with the backing volume behind the filesystem. In
# general I expect that to get from "-vN" to "-v(N+1)" some manual
# upgrade steps will be required.
stateDirectory = "zkapissuer-v2";
in lib.mkIf cfg.enable {
# Make sure the voucher database filesystem is mounted.
fileSystems = {
"zkapissuer-data" = cfg.databaseFileSystem // {
mountPoint = "/var/lib/${stateDirectory}";
};
};
# Add a systemd service to run PaymentServer.
systemd.services.zkapissuer = {
enable = true;
description = "ZKAP Issuer";
wantedBy = [ "multi-user.target" ];
# Make sure we have a certificate the first time, if we are running over
# TLS and require a certificate.
requires = lib.optional cfg.tls "${certServiceName}.service";
after = [
# Make sure there is a network so we can bind to all of the
# interfaces.
"network.target"
] ++
# Make sure we run after the certificate is issued, if we are running
# over TLS and require a certificate.
lib.optional cfg.tls "${certServiceName}.service";
# It really shouldn't ever exit on its own! If it does, it's a bug
# we'll have to fix. Restart it and hope it doesn't happen too much
# before we can fix whatever the issue is.
serviceConfig.Restart = "always";
serviceConfig.Type = "simple";
# Run w/o privileges
serviceConfig = {
DynamicUser = false;
User = "zkapissuer";
Group = "zkapissuer";
};
# Make systemd create a User/Group owned directory for PaymentServer
# state. According to the docs at
# https://www.freedesktop.org/software/systemd/man/systemd.exec.html#RuntimeDirectory=
# "The specified directory names must be relative" ... this makes
# systemd create this directory in /var/lib/ for us.
serviceConfig.StateDirectory = stateDirectory;
serviceConfig.StateDirectoryMode = "0750";
unitConfig.AssertPathExists = [
# Bail if there is still an old (root-owned) DB file on this system.
# If you hit this, and this /var/db/ file is indeed current, move it
# to /var/lib/zkapissuer/vouchers.sqlite3 and chown it to
# zkapissuer:zkapissuer.
"!/var/db/vouchers.sqlite3"
# Similarly, bail if the newer path you were just told to create --
# /var/lib/zkapissuer/vouchers.sqlite3 -- exists. It needs to be
# moved /var/lib/zkapissuer-v2 where a dedicated filesystem has been
# created for it.
"!/var/lib/zkapissuer/vouchers.sqlite3"
];
# Only start if the dedicated vouchers database filesystem is mounted so
# that we know we're going to find our vouchers database there (or that
# we will create it in the right place).
unitConfig.Requires = ["local-fs.target"];
unitConfig.After = ["local-fs.target"];
script =
let
# Compute the right command line arguments to pass to it. The
......@@ -156,16 +215,7 @@ in {
if cfg.database == "Memory"
then "--database Memory"
else "--database SQLite3 --database-path ${cfg.databasePath}";
httpsArgs =
if cfg.tls
then
"--https-port 443 " +
"--https-certificate-path ${certroot}/${domain}/cert.pem " +
"--https-certificate-chain-path ${certroot}/${domain}/chain.pem " +
"--https-key-path ${certroot}/${domain}/privkey.pem"
else
# Only for automated testing.
"--http-port 80";
httpArgs = "--http-port ${internalHttpPort}";
prefixOption = s: "--cors-origin=" + s;
originStrings = map prefixOption cfg.allowedChargeOrigins;
......@@ -173,36 +223,84 @@ in {
stripeArgs =
"--stripe-key-path ${cfg.stripeSecretKeyPath} " +
"--stripe-webhook-key-path ${cfg.stripeWebhookSecretKeyPath} " +
"--stripe-endpoint-domain ${cfg.stripeEndpointDomain} " +
"--stripe-endpoint-scheme ${cfg.stripeEndpointScheme} " +
"--stripe-endpoint-port ${toString cfg.stripeEndpointPort}";
in
"${cfg.package}/bin/PaymentServer-exe ${originArgs} ${issuerArgs} ${databaseArgs} ${httpsArgs} ${stripeArgs}";
};
# Certificate renewal. We must declare that we *require* it in our
# service above.
systemd.services."${certServiceName}" = {
enable = true;
description = "Certificate ${domain}";
serviceConfig = {
ExecStart =
let
configArgs = "--config-dir /var/lib/letsencrypt --work-dir /var/run/letsencrypt --logs-dir /var/run/log/letsencrypt";
redemptionConfig = lib.optionalString (cfg.tokensPerVoucher != null) "--tokens-per-voucher ${builtins.toString cfg.tokensPerVoucher}";
in
pkgs.writeScript "cert-${domain}-start.sh" ''
#!${pkgs.runtimeShell} -e
# Register if necessary.
${pkgs.certbot}/bin/certbot register ${configArgs} --non-interactive --agree-tos -m ${cfg.letsEncryptAdminEmail} || true
# Obtain the certificate.
${pkgs.certbot}/bin/certbot certonly ${configArgs} --non-interactive --standalone --expand --domains ${builtins.concatStringsSep "," cfg.domains}
'';
"${cfg.package.exePath} ${originArgs} ${issuerArgs} ${databaseArgs} ${httpArgs} ${stripeArgs} ${redemptionConfig}";
};
# PaymentServer runs as this user and group by default
# Mind the comments in nixpkgs/nixos/modules/misc/ids.nix: "When adding a uid,
# make sure it doesn't match an existing gid. And don't use uids above 399!"
ids.uids.zkapissuer = 397;
ids.gids.zkapissuer = 397;
users.extraGroups.zkapissuer.gid = config.ids.gids.zkapissuer;
users.extraUsers.zkapissuer = {
uid = config.ids.uids.zkapissuer;
isNormalUser = false;
group = "zkapissuer";
# Let PaymentServer read from keys, if necessary.
extraGroups = [ "keys" ];
};
# Open 80 and 443 for the certbot HTTP server and the PaymentServer HTTPS server.
# Open 80 and 443 for nginx
networking.firewall.allowedTCPPorts = [
80
443
];
# NGINX reverse proxy
security.acme.defaults.email = cfg.letsEncryptAdminEmail;
security.acme.acceptTerms = true;
services.nginx = {
enable = true;
recommendedGzipSettings = true;
recommendedOptimisation = true;
recommendedProxySettings = true;
recommendedTlsSettings = true;
virtualHosts."${domain}" = {
serverAliases = builtins.tail cfg.domains;
enableACME = cfg.tls;
forceSSL = cfg.tls;
locations."/v1/" = {
# Only forward requests beginning with /v1/ so
# we pass less scanning spam on to our backend
# Want a regex instead? try locations."~ /v\d+/"
proxyPass = "http://127.0.0.1:${internalHttpPort}";
# The redemption endpoint can intentionally delay its response for
# up to 600 seconds for a cheap kind of server-push when payment
# completes. Let that timeout control how long the connection stays
# open. PaymentServer does not accept configuration for that
# duration so we also hard-code it here.
#
# http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_read_timeout
extraConfig = ''
proxy_read_timeout 660;
'';
};
locations."/metrics" = {
# Only allow our monitoringvpn subnet
extraConfig = ''
allow 172.23.23.0/24;
deny all;
'';
proxyPass = "http://127.0.0.1:${internalHttpPort}";
};
locations."/" = {
# Return a 404 error for any paths not specified above.
extraConfig = ''
return 404;
'';
};
};
};
};
}
# Prometheus Blackbox exporter configuration
#
# Scope: From the monitoring machine, ping (etc.) hosts to check whether
# they are reachable, certs still are valid for a while, etc.
#
# Notes: The Blackbox exporter is using the "Multi Target Exporter" pattern,
# see https://prometheus.io/docs/guides/multi-target-exporter/ .
#
# Usage: Import this on a monitoring server
{ config, lib, pkgs, ... }: {
config.services.prometheus.exporters.blackbox = {
enable = true;
configFile = pkgs.writeText "blackbox-exporter.yaml" (builtins.toJSON {
modules = {
https_2xx = {
prober = "http";
timeout = "5s";
http = {
fail_if_not_ssl = true;
# This prober is for IPv4 only.
preferred_ip_protocol = "ip4";
ip_protocol_fallback = false;
};
};
};
});
};
}
# MegaCli to Prometheus text format exporter
#
# Scope: Gets data from MegaRAID compatible storage controllers and mogrifies
# to Prometheus text format, saves to a temp file, to later be scraped
# by the node exporter.
#
# Usage: Import this to every server with a MegaRAID card that you want to
# include in the central monitoring system
#
# See https://nixos.org/manual/nixos/stable/#module-services-prometheus-exporters
{ config, options, lib, ourpkgs, pkgs, ... }:
let
cfg = config.services.private-storage.monitoring.exporters.megacli2prom;
in {
options.services.private-storage.monitoring.exporters.megacli2prom = {
enable = lib.mkEnableOption "MegaCli2Prom metrics gathering service";
outFile = lib.mkOption {
type = lib.types.str;
description = "Where to store the temporary file for node exporter to scrape?";
default = "/run/prometheus-node-exporter/megacli.prom";
};
interval = lib.mkOption {
type = lib.types.str;
description = ''
How often to do it?
See https://www.freedesktop.org/software/systemd/man/systemd.time.html#Calendar%20Events
'';
# Every five minutes.
default = "*:0/5";
};
};
config =
lib.mkIf cfg.enable {
environment.systemPackages = [ ourpkgs.megacli2prom pkgs.megacli ];
systemd.services.megacli2prom = {
enable = true;
description = "MegaCli2Prom metrics gathering service";
startAt = cfg.interval;
path = [ pkgs.megacli ];
# Save to a temp file and then move atomically so the
# textfile collector won't read a partial file.
# See https://github.com/prometheus/node_exporter#textfile-collector
script = ''
"${ourpkgs.megacli2prom}/bin/megacli2prom" > "${cfg.outFile}.tmp"
mv "${cfg.outFile}.tmp" "${cfg.outFile}"
'';
};
};
}
......@@ -6,15 +6,25 @@
# monitoring system
# See https://nixos.org/manual/nixos/stable/#module-services-prometheus-exporters
{ config, lib, pkgs, ... }:
{ config, lib, pkgs, options, ... }:
with lib;
let
cfg = config.services.private-storage.monitoring.exporters.node;
mountsFileSystemType = fsType: {} != filterAttrs (n: v: v.fsType == fsType) config.fileSystems;
in {
config.services.prometheus.exporters.node = {
options.services.private-storage.monitoring.exporters.node = {
enable = lib.mkEnableOption "Base system metrics collection";
textfiles-directory = lib.mkOption {
type = lib.types.str;
description = "Directory used by the textfiles collector.";
default = "/run/prometheus-node-exporter";
};
};
config.services.prometheus.exporters.node = lib.mkIf cfg.enable {
enable = true;
openFirewall = true;
firewallFilter = "-i monitoringvpn -p tcp -m tcp --dport 9100";
......@@ -22,7 +32,7 @@ in {
# extraFlags = [ "--collector.disable-defaults" ]; # not in nixpkgs 19.09
# Thanks https://github.com/mayflower/nixexprs/blob/master/modules/monitoring/default.nix
enabledCollectors = [
"arp"
# "arp" # is broken in 1.7.0 (2024-02-07)
"bcache"
"conntrack"
"filefd"
......@@ -30,16 +40,16 @@ in {
"netclass"
"netdev"
"netstat"
#"rapl" # not in nixpkgs 19.09
"rapl"
"sockstat"
#"softnet" # not in nixpkgs 19.09
"softnet"
"stat"
"systemd"
# "textfile"
# "textfile.directory /run/prometheus-node-exporter"
#"thermal_zone" # not in nixpkgs 19.09
"textfile"
"textfile.directory ${cfg.textfiles-directory}"
"thermal_zone"
"time"
#"udp_queues" # not in nixpkgs 19.09
"udp_queues"
"uname"
"vmstat"
] ++ optionals (!config.boot.isContainer) [
......@@ -59,7 +69,7 @@ in {
] ++ (
optionals (config.services.nfs.server.enable) [ "nfsd" ]
) ++ (
optionals ("" != config.boot.initrd.mdadmConf) [ "mdadm" ]
optionals ("" != config.boot.swraid.mdadmConf) [ "mdadm" ]
) ++ (
optionals ({} != config.networking.bonds) [ "bonding" ]
) ++ (
......@@ -67,7 +77,7 @@ in {
) ++ (
optionals (mountsFileSystemType "xfs") [ "xfs" ]
) ++ (
optionals (mountsFileSystemType "zfs" || elem "zfs" config.boot.supportedFilesystems) [ "zfs" ]
optionals (mountsFileSystemType "zfs" || config.boot.supportedFilesystems.zfs or false) [ "zfs" ]
);
};
}
......
This diff is collapsed.