Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • privatestorage/PrivateStorageio
  • tomprince/PrivateStorageio
2 results
Show changes
Showing
with 668 additions and 350 deletions
# Do not modify this file! It was generated by ‘nixos-generate-config’
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
# NixOS configuration specific to this node
{ config, lib, pkgs, ... }:
{
......@@ -12,7 +10,7 @@
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
boot.kernel.sysctl = { "vm.swappiness" = 0; };
boot.kernel.sysctl = { "vm.swappiness" = 1; };
fileSystems."/" =
{ device = "/dev/disk/by-uuid/2653c6bb-396f-4911-b9ff-b68de8f9715d";
......@@ -37,6 +35,6 @@
randomEncryption = true;
} ];
nix.maxJobs = lib.mkDefault 32;
nix.settings.max-jobs = lib.mkDefault 32;
powerManagement.cpuFreqGovernor = lib.mkDefault "powersave";
}
......@@ -8,11 +8,14 @@
"payments.privatestorage-staging.com"
, "payments.extra.privatestorage-staging.com"
]
, "monitoringDomains": [
"monitoring.privatestorage-staging.com"
, "monitoring.extra.privatestorage-staging.com"
]
, "letsEncryptAdminEmail": "jean-paul@privatestorage.io"
, "allowedChargeOrigins": [
"http://localhost:5000"
, "https://privatestorage-staging.com"
, "https://www.privatestorage-staging.com"
]
, "monitoringGoogleOAuthClientID": "802959152038-6esn1c6u2lm3j82lf29jvmn8s63hi8dc.apps.googleusercontent.com"
}
# See morph/grid/local/grid.nix for additional commentary.
let
pkgs = import <nixpkgs> { };
gridlib = import ../../lib;
grid-config = pkgs.lib.trivial.importJSON ./config.json;
grid-config = builtins.fromJSON (builtins.readFile ./config.json);
# Module with per-grid configuration
grid-module = {config, ...}: {
......@@ -21,6 +19,7 @@ let
grid = {
publicKeyPath = toString ./. + "/${grid-config.publicKeyPath}";
privateKeyPath = toString ./. + "/${grid-config.privateKeyPath}";
inherit (grid-config) monitoringvpnEndpoint letsEncryptAdminEmail;
};
# Configure deployment management authorization for all systems in the grid.
services.private-storage.deployment = {
......@@ -33,11 +32,14 @@ let
imports = [
gridlib.issuer
gridlib.hardware-aws
(gridlib.customize-issuer (grid-config // {
monitoringvpnIPv4 = "172.23.23.11";
}))
grid-module
];
config = {
grid.monitoringvpnIPv4 = "172.23.23.11";
grid.issuer = {
inherit (grid-config) issuerDomains allowedChargeOrigins;
};
};
};
storage001 = {
......@@ -45,42 +47,52 @@ let
gridlib.storage
gridlib.hardware-aws
./testing001-hardware.nix
(gridlib.customize-storage (grid-config // {
monitoringvpnIPv4 = "172.23.23.12";
stateVersion = "19.03";
}))
grid-module
];
config = {
grid.monitoringvpnIPv4 = "172.23.23.12";
grid.storage = {
inherit (grid-config) passValue publicStoragePort;
};
system.stateVersion = "19.03";
};
};
monitoring = {
imports = [
gridlib.monitoring
gridlib.hardware-aws
(gridlib.customize-monitoring {
inherit hostsMap vpnClientIPs nodeExporterTargets paymentExporterTargets;
inherit (grid-config) letsEncryptAdminEmail;
googleOAuthClientID = grid-config.monitoringGoogleOAuthClientID;
monitoringvpnIPv4 = "172.23.23.1";
stateVersion = "19.09";
})
grid-module
];
config = {
grid.monitoringvpnIPv4 = "172.23.23.1";
grid.monitoring = {
inherit paymentExporterTargets blackboxExporterHttpsTargets;
inherit (grid-config) monitoringDomains;
googleOAuthClientID = grid-config.monitoringGoogleOAuthClientID;
enableZulipAlert = true;
};
system.stateVersion = "19.09";
};
};
# TBD: derive these automatically:
hostsMap = {
"172.23.23.1" = [ "monitoring" "monitoring.monitoringvpn" ];
"172.23.23.11" = [ "payments" "payments.monitoringvpn" ];
"172.23.23.12" = [ "storage001" "storage001.monitoringvpn" ];
};
vpnClientIPs = [ "172.23.23.11" "172.23.23.12" ];
nodeExporterTargets = [ "monitoring" "payments" "storage001" ];
paymentExporterTargets = [ "payments" ];
paymentExporterTargets = [ "payments.monitoringvpn" ];
blackboxExporterHttpsTargets = [
"https://privatestorage-staging.com/"
"https://www.privatestorage-staging.com/"
"https://extra.privatestorage-staging.com/"
"https://www.extra.privatestorage-staging.com/"
"https://payments.privatestorage-staging.com/"
"https://payments.extra.privatestorage-staging.com/"
"https://monitoring.privatestorage-staging.com/"
"https://monitoring.extra.privatestorage-staging.com/"
];
in {
network = {
description = "PrivateStorage.io Testing Grid";
inherit (gridlib) pkgs;
};
inherit payments monitoring storage001;
}
p2kt6691@p2kt6691.repo.borgbase.com:repo
let
jcalderone = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN4GenAY/YLGuf1WoMXyyVa3S9i4JLQ0AG+pt7nvcLlQ exarkun@baryon";
flo = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHx7wJQNqKn8jOC4AxySRL2UxidNp7uIK9ad3pMb1ifF flo@fs-la";
flo = ["ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII78HGtpjFxQo7wol85hqfoCqjdK9Nk7+82rwttyLHpe flo@la-staging"];
bdonneaux = ["ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGgpTXgxEqQPSl17NzJkAJgeDSFS1Ke/qjCuVMTZLlna benoit@leastauthority.com" "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIZtWY7t8HVnaz6bluYsrAlzZC3MZtb8g0nO5L5fCQKR benoit@leastauthority.com"];
chris = ["ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDT3/sSNoJP3E17oFTYjHN+uOwTY1wVox9tff97iueIo4V88eAc/oXKETiwPkr33qbGwuoXKkxIXJVz8rnNtO6IjTm9qfgzPiRAUJjew8bunL+V7SbhQIv7nk1fyV/efaENElG8bdmzTEpgwGcEnyibqvHJSYX6W+dMCz3G1t/lv97if3ohZKENHuMC5hLfJbGHSGKFO5XdjEjeda9lDd9Ac8XyruaL7iqEefsC7GuUgNRn8V83vwuJMDAC2xXC2V11M65VkGs6WPAct2+llzTtYbsxjxVZXC4yU42eXJYfBZEcCTPtJsKJxQCqSgFOEUnOYiuS6p4Q7a97BfHJ9S9oOV8U/e7YeE4b9Q8TPNzvKTPBAsuKyLyNYekBDB7fOTFziuJy/L578EaDv2BxrsfyCQqtjLko6TIAUbbHvce8urWNvj7H+fNXaURLIQmSTOv/mMl+omkvbP3MNgSFdENpCZaHSTiDxjygf52xcinj6Ijf3uDvPY2UjIRrbWSNV4MYpZDfkqt9THY4QibmxhER/YGvY+0zfiYGqQpQMMbTUB9hhoO5AHnnhMszNG2V9i70VqWyEMsS+Sr1+gOVAPraLp/tqHaqZk7/c4DDpILjA+4davTL6lgaiewx8a0ZEPAKZCZkOMovZKwkVIjyMvfekUkf1cF+QigJPZzcWSWEjQ== cardno:000608671823"];
last-resort = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE1hy9mPkJI+7mY2Uq6CLpuFMMLOTfiY2sRJHwpihgRt cardno:26 269 859 - Last Resort A-Key"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJPYMUVNuWr2y+FL1GxW6S6jb3BWYhbzJ2zhvQVKu2ll cardno:23 845 763 - Last Resort C-key"
];
in
{
"root" = jcalderone;
inherit jcalderone;
inherit flo;
"root" = flo ++ bdonneaux ++ chris ++ last-resort;
inherit flo bdonneaux chris last-resort;
}
......@@ -18,10 +18,36 @@
corresponding private keys for the system.
'';
};
monitoringvpnIPv4 = lib.mkOption {
type = lib.types.str;
description = ''
The IPv4 address of this node on the monitoring VPN.
'';
};
monitoringvpnEndpoint = lib.mkOption {
type = lib.types.str;
description = ''
The domain name and port of the monitoring VPN endpoint.
'';
};
letsEncryptAdminEmail = lib.mkOption {
type = lib.types.str;
description = ''
A string giving an email address to use for Let's Encrypt registration and
certificate issuance.
'';
};
};
# Any extra NixOS modules to load on all our servers. Note that just
# because they're loaded doesn't *necessarily* mean they're turned on.
imports = [
../../nixos/modules/packages.nix
# Set options intended for a "small" NixOS: Do not build X and docs.
<nixpkgs/nixos/modules/profiles/minimal.nix>
# This brings in various other modules that define options for different
# areas of the service.
../../nixos/modules/default.nix
];
config = {
......@@ -31,6 +57,32 @@
# being configured and using variable names complicates a lot of things).
# Instead, just tell morph how to reach the node here - by using its fully
# qualified domain name.
deployment.targetHost = "${config.networking.hostName}.${config.networking.domain}";
deployment.targetHost = config.networking.fqdn;
services.private-storage.monitoring.exporters.promtail.enable = true;
# Install no documentation on grid
# It seems 24.05 has some new defaults that aren't stripped away by the
# 'minimal' profile above.
# See https://github.com/NixOS/nixpkgs/blob/nixos-24.05/nixos/modules/misc/documentation.nix
documentation.enable = false;
documentation.man.enable = false;
documentation.info.enable = false;
documentation.doc.enable = false;
documentation.dev.enable = false;
documentation.nixos.enable = false;
assertions = [
# This is a check to save somebody in the future trying to debug why
# setting `nixpkgs.config` is not having an effect.
{
# `{}` is the default value for `nixpkgs.config`
assertion = config.nixpkgs.config == {};
message = ''
Since we set `nixpkgs.pkgs` via morph's `network.pkgs`, the value for `nixpkgs.config` is ignored.
See https://whetstone.private.storage/privatestorage/PrivateStorageio/-/issues/85#note_15876 for details.
'';
}
];
};
}
......@@ -67,7 +67,7 @@ let
# Stop! I hope you're done when you get here. If you have to modify
# anything below this point the expression should probably be refactored and
# another variable added controlling whatever new thing you need to control.
# Open an issue: https://whetstone.privatestorage.io/privatestorage/PrivateStorageio/-/issues/new?issue
# Open an issue: https://whetstone.private.storage/privatestorage/PrivateStorageio/-/issues/new?issue
in
# Define a function that ignores all its arguments. We don't need any of them
# for now.
......@@ -87,7 +87,6 @@ in
# Configure the bootloader how we like.
boot.loader.timeout = 10;
boot.loader.grub.enable = true;
boot.loader.grub.version = 2;
boot.loader.grub.device = "/dev/disk/by-id/${grubDeviceID}";
# Let me in to do subsequent configuration. This makes the machine wide
......
# Importing this adds a daily borg backup job to a node.
# It has all the common config and keys, and can be configured
# to back up more (or entirely different) folders.
{ lib, config, pkgs, ...}:
let
cfg = config.services.private-storage.borgbackup;
inherit (config.grid) publicKeyPath privateKeyPath;
# Get a per-host number so backup jobs don't all run at the
# same time.
ip-util = import ../../nixos/lib/ip-util.nix;
backupDelay = with builtins; bitAnd (ip-util.fromHexString
(hashString "md5" config.networking.hostName)) 15;
in {
options.services.private-storage.borgbackup = {
enable = lib.mkEnableOption "Borgbackup daily backup job";
paths = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
A list of directories to back up using Borg.
'';
default = [ "/storage" ];
};
};
config = lib.mkIf cfg.enable {
deployment = {
secrets = {
"borgbackup-passphrase" = {
# The passphrase is used to encrypt the repo key
# https://borgbackup.readthedocs.io/en/stable/usage/init.html
destination = "/run/keys/borgbackup/passphrase";
source = "${privateKeyPath}/borgbackup.passphrase";
};
"borgbackup-appendonly-ssh-key" = {
# The ssh key is used to authenticate to the remote repo server
destination = "/run/keys/borgbackup/ssh-key";
source = "${privateKeyPath}/borgbackup.ssh-key";
};
};
};
services.borgbackup.jobs = {
daily = {
paths = cfg.paths;
repo = lib.fileContents "${publicKeyPath}/borgbackup/${config.networking.hostName}.repopath";
doInit = false;
encryption = {
mode = "repokey-blake2";
passCommand = "cat /run/keys/borgbackup/passphrase";
};
environment = {
BORG_RSH = "ssh -i /run/keys/borgbackup/ssh-key -o StrictHostKeyChecking=accept-new";
};
# Output statistics after uploading a backup set
extraCreateArgs = "--stats --json";
# All logs in JSON to help Prometheus/Grafana
extraArgs = "--log-json";
# Ciphertext doesn't compress well
compression = "none";
# Start the backup at a different time per machine,
# and not at the full hour, but somewhat later
startAt = "*-*-* " + toString backupDelay + ":22:11 UTC";
};
};
# Check repo once a month
systemd.services.borgbackup-check-repo = {
# Once a month, 3h after last backup started
startAt = "*-*-" + toString backupDelay + " 18:33:22 UTC";
path = [ pkgs.borgbackup ];
environment = {
BORG_PASSCOMMAND = "cat /run/keys/borgbackup/passphrase";
BORG_RSH = "ssh -i /run/keys/borgbackup/ssh-key -o StrictHostKeyChecking=accept-new";
BORG_REPO = lib.fileContents "${publicKeyPath}/borgbackup/${config.networking.hostName}.repopath";
};
script = ''${pkgs.borgbackup}/bin/borg check --verbose --log-json'';
};
};
}
# Define a function which returns a value which fills in all the holes left by
# ``issuer.nix``.
{
# A string giving the IP address and port number (":"-separated) of the VPN
# server.
monitoringvpnEndpoint
# A string giving the VPN IPv4 address for this system.
, monitoringvpnIPv4
# A string giving an email address to use for Let's Encrypt registration and
# certificate issuance.
, letsEncryptAdminEmail
# A list of strings giving the domain names that point at this issuer
# system. These will all be included in Let's Encrypt certificate.
, issuerDomains
# A list of strings giving CORS Origins will the issuer will be configured
# to allow.
, allowedChargeOrigins
, ...
}:
{ config, ... }:
let
inherit (config.grid) publicKeyPath privateKeyPath;
in {
deployment.secrets = {
# ``.../monitoringvpn`` is a path on the deployment system of a directory
# containing a number of VPN-related secrets. This is expected to contain
# a number of files named like ``<VPN IPv4 address>.key`` containing the
# VPN private key for the corresponding host. It must also contain
# ``server.pub`` and ``preshared.key`` holding the VPN server's public key
# and the pre-shared key, respectively. All of these things are used as
# the sources of various VPN-related morph secrets.
"monitoringvpn-secret-key".source = "${privateKeyPath}/monitoringvpn/${monitoringvpnIPv4}.key";
"monitoringvpn-preshared-key".source = "${privateKeyPath}/monitoringvpn/preshared.key";
};
services.private-storage.monitoring.vpn.client = {
enable = true;
ip = monitoringvpnIPv4;
endpoint = monitoringvpnEndpoint;
endpointPublicKeyFile = "${publicKeyPath}/monitoringvpn/server.pub";
};
services.private-storage-issuer = {
inherit letsEncryptAdminEmail allowedChargeOrigins;
domains = issuerDomains;
};
system.stateVersion = "19.03";
}
# Define a function which returns a value which fills in all the holes left by
# ``monitoring.nix``.
{
# A set mapping VPN IP addresses as strings to lists of hostnames as
# strings. The system's ``/etc/hosts`` will be populated with this
# information. Apart from helping with normal forward resolution, this
# *also* gives us reverse resolution from the VPN IPs to hostnames which
# allows Grafana to show us hostnames instead of VPN IP addresses.
hostsMap
# See ``customize-issuer.nix``.
, monitoringvpnIPv4
, letsEncryptAdminEmail
# A list of VPN IP addresses as strings indicating which clients will be
# allowed onto the VPN.
, vpnClientIPs
# A list of VPN clients (IP addresses or hostnames) as strings indicating
# which nodes to scrape "nodeExporter" metrics from.
, nodeExporterTargets
# A list of VPN clients (IP addresses or hostnames) as strings indicating
# which nodes to scrape "nginxExporter" metrics from.
, nginxExporterTargets ? []
# A list of VPN clients (IP addresses or hostnames) as strings indicating
# which nodes to scrape PaymentServer metrics from.
, paymentExporterTargets ? []
# A string containing the GSuite OAuth2 ClientID to use to authenticate
# logins to Grafana.
, googleOAuthClientID
# A string giving the NixOS state version for the system.
, stateVersion
, ...
}:
{ config, ... }:
let
inherit (config.grid) publicKeyPath privateKeyPath;
in {
deployment.secrets = let
# When Grafana SSO is disabled there is not necessarily any client secret
# available. Avoid telling morph that there is one in this case (so it
# avoids trying to read it and then failing). Even if the secret did
# exist, if SSO is disabled there's no point sending the secret to the
# server.
#
# Also, we have to define this whole secret here so that we can configure
# it completely or not at all. morph gets angry if we half configure it
# (say, by just omitting the "source" value).
grafanaSSO =
if googleOAuthClientID == ""
then { }
else {
"grafana-google-sso-secret" = {
source = "${privateKeyPath}/grafana-google-sso.secret";
destination = "/run/keys/grafana-google-sso.secret";
owner.user = config.systemd.services.grafana.serviceConfig.User;
owner.group = config.users.users.grafana.group;
permissions = "0400";
action = ["sudo" "systemctl" "restart" "grafana.service"];
};
"grafana-admin-password" = {
source = "${privateKeyPath}/grafana-admin.password";
destination = "/run/keys/grafana-admin.password";
owner.user = config.systemd.services.grafana.serviceConfig.User;
owner.group = config.users.users.grafana.group;
permissions = "0400";
action = ["sudo" "systemctl" "restart" "grafana.service"];
};
};
monitoringvpn = {
"monitoringvpn-private-key".source = "${privateKeyPath}/monitoringvpn/server.key";
"monitoringvpn-preshared-key".source = "${privateKeyPath}/monitoringvpn/preshared.key";
};
in
grafanaSSO // monitoringvpn;
networking.hosts = hostsMap;
services.private-storage.monitoring.vpn.server = {
enable = true;
ip = monitoringvpnIPv4;
inherit vpnClientIPs;
pubKeysPath = "${publicKeyPath}/monitoringvpn";
};
services.private-storage.monitoring.prometheus = {
inherit nodeExporterTargets;
inherit nginxExporterTargets;
inherit paymentExporterTargets;
};
services.private-storage.monitoring.grafana = {
inherit letsEncryptAdminEmail;
inherit googleOAuthClientID;
domain = "${config.networking.hostName}.${config.networking.domain}";
};
system.stateVersion = stateVersion;
}
# Define a function which returns a value which fills in all the holes left by
# ``storage.nix``.
{
# See ``customize-issuer.nix``
monitoringvpnEndpoint
, monitoringvpnIPv4
# An integer giving the value of a single pass in byte×months.
, passValue
# An integer giving the port number to include in Tahoe storage service
# advertisements and on which to listen for storage connections.
, publicStoragePort
# A string giving the NixOS state version for the system.
, stateVersion
, ...
}:
{ config, ... }:
let
inherit (config.grid) publicKeyPath privateKeyPath;
in {
deployment.secrets = {
"monitoringvpn-secret-key".source = "${privateKeyPath}/monitoringvpn/${monitoringvpnIPv4}.key";
"monitoringvpn-preshared-key".source = "${privateKeyPath}/monitoringvpn/preshared.key";
};
services.private-storage = {
inherit passValue publicStoragePort;
};
services.private-storage.monitoring.vpn.client = {
enable = true;
ip = monitoringvpnIPv4;
endpoint = monitoringvpnEndpoint;
endpointPublicKeyFile = "${publicKeyPath}/monitoringvpn/server.pub";
};
system.stateVersion = stateVersion;
}
......@@ -5,16 +5,37 @@
base = import ./base.nix;
hardware-aws = import ./issuer-aws.nix;
hardware-virtual = import ./hardware-virtual.nix;
hardware-vagrant = import ./hardware-vagrant.nix;
hardware-monitoring-ovh = import ./issuer-monitoring-ovh.nix;
hardware-payments-ovh = import ./issuer-payments-ovh.nix;
issuer = import ./issuer.nix;
customize-issuer = import ./customize-issuer.nix;
storage = import ./storage.nix;
customize-storage = import ./customize-storage.nix;
monitoring = import ./monitoring.nix;
customize-monitoring = import ./customize-monitoring.nix;
borgbackup = import ./borgbackup.nix;
modules = builtins.toString ../../nixos/modules;
# The nixpkgs version used in our deployments. This affects both the packages
# installed, as well as the NixOS module set that is used.
# This is intended to be used in a grid definition like:
# network = { ... ; inherit (gridlib) pkgs; ... }
pkgs = import ../../nixpkgs.nix {
# Ensure that configuration of the system where this runs
# doesn't leak into what we build.
# See https://github.com/NixOS/nixpkgs/issues/62513
config = { pkgs }: let lib = pkgs.lib; in {
allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [
"megacli"
];
};
# Expose `nixos/pkgs` as an attribute of our package set.
# This is is primarly consumed by `nixos/modules/packages.nix`, which
# then exposes it as a module argument. We do this here, so that
# the package set only needs to be evaluted once for the grid, rather
# than once for each host.
overlays = [
(self: super: { ourpkgs = self.callPackage ../../nixos/pkgs {}; })
];
};
}
{ config, lib, modulesPath, ... }:
{
imports = [
# modulesPath points at the upstream nixos/modules directory.
"${modulesPath}/virtualisation/vagrant-guest.nix"
];
options.grid = {
publicIPv4 = lib.mkOption {
type = lib.types.str;
description = ''
The primary IPv4 address of the virtual machine.
'';
};
};
config = {
services.qemuGuest.enable = true;
boot.loader.grub.device = "/dev/vda";
boot.initrd.availableKernelModules = [ "ata_piix" "virtio_pci" "virtio_blk" "sd_mod" "sr_mod" ];
boot.kernel.sysctl = { "vm.swappiness" = 0; };
boot.kernelParams = [ "console=tty0" "console=ttyS0,115200" ];
# remove the fsck that runs at startup. It will always fail to run, stopping
# your boot until you press *.
boot.initrd.checkJournalingFS = false;
networking.interfaces.ens5.ipv4.addresses = [{
address = config.grid.publicIPv4;
prefixLength = 24;
}];
# The issuer configuration wants to read the location of its database
# directory from the filesystem configuration. Since the Vagrant
# environment doesn't have separate volume-as-infrastructure management
# (maybe it could? but why bother?) we do a bind-mount here so there is a
# configured value readable. The database won't really have a dedicated
# volume but it will sort of appear as if it does.
services.private-storage-issuer.databaseFileSystem = {
device = "/var/lib/origin-zkapissuer-v2";
options = ["bind"];
};
# XXX This should be handled by the storage module like the zkap
# filesystem above is handled by the issuer module.
fileSystems."/storage" = { fsType = "tmpfs"; };
fileSystems."/" =
{ device = "/dev/vda1";
fsType = "ext4";
};
# We want to push packages with morph without having to sign them
nix.settings.trusted-users = [ "@wheel" "root" "vagrant" ];
};
}
{ publicIPv4, ... }:
{
imports = [ ./vagrant-guest.nix ];
virtualisation.virtualbox.guest.enable = true;
# Use the GRUB 2 boot loader.
boot.loader.grub.enable = true;
boot.loader.grub.version = 2;
boot.loader.grub.device = "/dev/sda";
boot.initrd.availableKernelModules = [ "ata_piix" "sd_mod" "sr_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernel.sysctl = { "vm.swappiness" = 0; };
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
# remove the fsck that runs at startup. It will always fail to run, stopping
# your boot until you press *.
boot.initrd.checkJournalingFS = false;
networking.interfaces.enp0s8.ipv4.addresses = [{
address = publicIPv4;
prefixLength = 24;
}];
fileSystems."/storage" = { fsType = "tmpfs"; };
fileSystems."/" =
{ device = "/dev/sda1";
fsType = "ext4";
};
swapDevices = [ ];
# We want to push packages with morph without having to sign them
nix.trustedUsers = [ "@wheel" "root" "vagrant" ];
}
......@@ -6,22 +6,48 @@
boot.loader.grub.device = lib.mkForce "/dev/nvme0n1";
ec2.hvm = true;
boot.kernel.sysctl = { "vm.swappiness" = 0; };
boot.kernel.sysctl = { "vm.swappiness" = 1; };
swapDevices = [ {
device = "/var/swapfile";
size = 4096; # megabytes
size = 1024; # megabytes
randomEncryption = true;
} ];
# If we don't manually and explicitly early-load the loop module, crypt-swap
# setup fails with the not very helpful message: "loop device with autoclear
# flag is required"
# See https://unix.stackexchange.com/a/554500/81275
boot.kernelModules = [ "loop" ];
# NixOS likes to fill up boot partitions with (by default) 100 old kernels.
# Keep a (for us) more reasonable number around.
boot.loader.grub.configurationLimit = 8;
# Break the tie between AWS and morph for the hostname by forcing the
# morph-supplied name. See also
# <https://github.com/DBCDK/morph/issues/146>.
networking.hostName = name;
# Mount a dedicated filesystem (ideally on a dedicated volume, but that's
# beyond control of this particular part of the system) for the
# PaymentServer voucher database. This makes it easier to manage for
# tasks like backup/recovery and encryption.
services.private-storage-issuer.databaseFileSystem = {
label = "zkapissuer-data";
};
# Clean up packages after a while
nix.gc = {
automatic = true;
dates = "weekly";
options = "--delete-older-than 30d";
};
# Turn on automatic optimization of nix store
# https://nixos.wiki/wiki/Storage_optimization
nix.settings.auto-optimise-store = true;
# Most of the time, we have ample free & usable memory, but when upgrading
# software, we sometimes run out because of Nix. This is supposed to help:
zramSwap.enable = true;
}
{ modulesPath, name, lib, ... }: {
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
boot.loader.grub.device = "/dev/sda";
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "xen_blkfront" ];
boot.initrd.kernelModules = [ "nvme" ];
boot.kernel.sysctl = { "vm.swappiness" = 1; };
fileSystems."/" = { device = "/dev/sda1"; fsType = "ext4"; };
swapDevices = [ {
device = "/var/swapfile";
size = 4096; # megabytes
randomEncryption = true;
} ];
# Break the tie between AWS and morph for the hostname by forcing the
# morph-supplied name. See also
# <https://github.com/DBCDK/morph/issues/146>.
networking.hostName = name;
# Mount a dedicated filesystem (ideally on a dedicated volume, but that's
# beyond control of this particular part of the system) for the
# PaymentServer voucher database. This makes it easier to manage for
# tasks like backup/recovery and encryption.
services.private-storage-issuer.databaseFileSystem = {
label = "zkapissuer-data";
};
# Clean up packages after a while
nix.gc = {
automatic = true;
dates = "weekly";
options = "--delete-older-than 30d";
};
# Turn on automatic optimization of nix store
# https://nixos.wiki/wiki/Storage_optimization
nix.settings.auto-optimise-store = true;
}
{ modulesPath, name, lib, ... }: {
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
boot.loader.grub.device = "/dev/sda";
boot.kernel.sysctl = { "vm.swappiness" = 1; };
fileSystems."/" = { device = "/dev/sda3"; fsType = "ext4"; };
swapDevices = [ {
device = "/dev/sda2";
randomEncryption = true;
} ];
# Break the tie between AWS and morph for the hostname by forcing the
# morph-supplied name. See also
# <https://github.com/DBCDK/morph/issues/146>.
networking.hostName = name;
# Mount a dedicated filesystem (ideally on a dedicated volume, but that's
# beyond control of this particular part of the system) for the
# PaymentServer voucher database. This makes it easier to manage for
# tasks like backup/recovery and encryption.
services.private-storage-issuer.databaseFileSystem = {
label = "zkapissuer-data";
};
# Clean up packages after a while
nix.gc = {
automatic = true;
dates = "weekly";
options = "--delete-older-than 30d";
};
# Turn on automatic optimization of nix store
# https://nixos.wiki/wiki/Storage_optimization
nix.settings.auto-optimise-store = true;
}
# This, along with `customize-issuer.nix, contains all of the NixOS system
# configuration necessary to specify an "issuer"-type system. Originally, this
# file has all the static configuration, and `customize-issuer.nix` was a function
# that filled in the holes. We are in the process of merging the modules, using settings
# instead of function arguments.
# See https://whetstone.privatestorage.io/privatestorage/PrivateStorageio/-/issues/80
{ config, ...}:
# This contains all of the NixOS system configuration necessary to specify an
# "issuer"-type system.
{ lib, config, ...}:
let
inherit (config.grid) publicKeyPath privateKeyPath;
inherit (config.grid) privateKeyPath;
inherit (config.grid.issuer) issuerDomains allowedChargeOrigins tokensPerVoucher;
in {
deployment = {
secrets = {
"ristretto-signing-key" = {
destination = "/run/keys/ristretto.signing-key";
source = "${privateKeyPath}/ristretto.signing-key";
owner.user = "zkapissuer";
owner.group = "zkapissuer";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "zkapissuer.service"];
};
"stripe-secret-key" = {
destination = "/run/keys/stripe.secret-key";
source = "${privateKeyPath}/stripe.secret";
owner.user = "zkapissuer";
owner.group = "zkapissuer";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "zkapissuer.service"];
};
imports = [
./monitoringvpn-client.nix
];
"monitoringvpn-secret-key" = {
destination = "/run/keys/monitoringvpn/client.key";
owner.user = "root";
owner.group = "root";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "wireguard-monitoringvpn.service"];
};
"monitoringvpn-preshared-key" = {
destination = "/run/keys/monitoringvpn/preshared.key";
owner.user = "root";
owner.group = "root";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "wireguard-monitoringvpn.service"];
};
options.grid.issuer = {
issuerDomains = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
A list of strings giving the domain names that point at this issuer
system. These will all be included in Let's Encrypt certificate.
'';
};
allowedChargeOrigins = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
A list of strings giving CORS Origins will the issuer will be configured
to allow.
'';
};
tokensPerVoucher = lib.mkOption {
default = null;
type = lib.types.nullOr lib.types.int;
example = 50000;
description = ''
If not null, a value to pass to PaymentServer for
``--tokens-per-voucher``.
'';
};
};
imports = [
../../nixos/modules/issuer.nix
../../nixos/modules/monitoring/vpn/client.nix
../../nixos/modules/monitoring/exporters/node.nix
];
config = {
deployment = {
secrets = {
"ristretto-signing-key" = {
destination = "/run/keys/ristretto.signing-key";
source = "${privateKeyPath}/ristretto.signing-key";
owner.user = "zkapissuer";
owner.group = "zkapissuer";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "zkapissuer.service"];
};
"stripe-secret-key" = {
destination = "/run/keys/stripe.secret-key";
source = "${privateKeyPath}/stripe.secret";
owner.user = "zkapissuer";
owner.group = "zkapissuer";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "zkapissuer.service"];
};
"stripe-webhook-secret-key" = {
destination = "/run/keys/stripe.webhook-secret-key";
source = "${privateKeyPath}/stripe.webhook-secret";
owner.user = "zkapissuer";
owner.group = "zkapissuer";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "zkapissuer.service"];
};
};
};
services.private-storage-issuer = {
enable = true;
tls = true;
ristrettoSigningKeyPath = config.deployment.secrets.ristretto-signing-key.destination;
stripeSecretKeyPath = config.deployment.secrets.stripe-secret-key.destination;
stripeWebhookSecretKeyPath = config.deployment.secrets.stripe-webhook-secret-key.destination;
database = "SQLite3";
databasePath = "${config.fileSystems."zkapissuer-data".mountPoint}/vouchers.sqlite3";
inherit (config.grid) letsEncryptAdminEmail;
inherit allowedChargeOrigins;
domains = issuerDomains;
inherit tokensPerVoucher;
};
services.private-storage.monitoring.exporters.node.enable = true;
services.private-storage-issuer = {
enable = true;
tls = true;
ristrettoSigningKeyPath = config.deployment.secrets.ristretto-signing-key.destination;
stripeSecretKeyPath = config.deployment.secrets.stripe-secret-key.destination;
database = "SQLite3";
databasePath = "/var/lib/zkapissuer/vouchers.sqlite3";
system.stateVersion = "19.03";
};
}
# Similar to ``issuer.nix`` but for a "monitoring"-type system. Holes are
# filled by ``customize-monitoring.nix``.
{
deployment = {
secrets = {
"monitoringvpn-private-key" = {
# This contains all of the NixOS system configuration necessary to specify an
# "monitoring"-type system.
{ lib, config, nodes, ...}:
let
cfg = config.grid.monitoring;
inherit (config.grid) publicKeyPath privateKeyPath monitoringvpnIPv4 letsEncryptAdminEmail;
# This collects information about monitored hosts from their configuration for use below.
monitoringHosts = lib.mapAttrsToList (name: node: rec {
inherit name;
vpnIPv4 = node.config.grid.monitoringvpnIPv4;
vpnHostName = "${name}.monitoringvpn";
hostNames = [name vpnHostName];
}) nodes;
# A set mapping VPN IP addresses as strings to lists of hostnames as
# strings. The system's ``/etc/hosts`` will be populated with this
# information. Apart from helping with normal forward resolution, this
# *also* gives us reverse resolution from the VPN IPs to hostnames which
# allows Grafana to show us hostnames instead of VPN IP addresses.
hostsMap = lib.listToAttrs (map (node: lib.nameValuePair node.vpnIPv4 node.hostNames) monitoringHosts);
# A list of VPN IP addresses as strings indicating which clients will be
# allowed onto the VPN.
vpnClientIPs = lib.remove monitoringvpnIPv4 (map (node: node.vpnIPv4) monitoringHosts);
# A list of VPN clients (IP addresses or hostnames) as strings indicating
# which nodes to scrape "nodeExporter" metrics from.
nodeExporterTargets = map (node: node.vpnHostName) monitoringHosts;
in {
imports = [
../../nixos/modules/monitoring/vpn/server.nix
../../nixos/modules/monitoring/server/grafana.nix
../../nixos/modules/monitoring/server/prometheus.nix
../../nixos/modules/monitoring/server/loki.nix
../../nixos/modules/monitoring/exporters/blackbox.nix
];
options.grid.monitoring = {
paymentExporterTargets = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
A list of VPN clients (IP addresses or hostnames) as strings indicating
which nodes to scrape PaymentServer metrics from.
'';
};
blackboxExporterHttpsTargets = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
A list of HTTPS servers (URLs, IP addresses or hostnames) as strings indicating
which nodes the BlackboxExporter should scrape HTTP and TLS metrics from.
'';
};
monitoringDomains = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
A list of strings giving the domain names that point at this monitoring
system. These will all be included in Let's Encrypt certificate.
'';
};
googleOAuthClientID = lib.mkOption {
type = lib.types.str;
default = "";
description = ''
A string containing the GSuite OAuth2 ClientID to use to authenticate
logins to Grafana.
'';
};
enableSlackAlert = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
Whether to enable alerting via Slack.
When true requires a grafana-slack-url file (see private-keys/README.rst).
'';
};
enableZulipAlert = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
Whether to enable alerting via Zulip.
When true requires a grafana-zulip-url file (see private-keys/README.rst).
'';
};
};
config = {
assertions = [
{
assertion = let
vpnIPs = (map (node: node.vpnIPv4) monitoringHosts);
in vpnIPs == lib.unique vpnIPs;
message = ''
Duplicate grid.monitoringvpnIPv4 values specified for different nodes.
'';
}
];
deployment.secrets = lib.mkMerge [
{
"monitoringvpn-private-key" = {
destination = "/run/keys/monitoringvpn/server.key";
source = "${privateKeyPath}/monitoringvpn/server.key";
owner.user = "root";
owner.group = "root";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "wireguard-monitoringvpn.service"];
};
"monitoringvpn-preshared-key" = {
};
"monitoringvpn-preshared-key" = {
destination = "/run/keys/monitoringvpn/preshared.key";
source = "${privateKeyPath}/monitoringvpn/preshared.key";
owner.user = "root";
owner.group = "root";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "wireguard-monitoringvpn.service"];
};
};
"grafana-admin-password" = {
source = "${privateKeyPath}/grafana-admin.password";
destination = "/run/keys/grafana-admin.password";
owner.user = config.systemd.services.grafana.serviceConfig.User;
owner.group = config.users.users.grafana.group;
permissions = "0400";
action = ["sudo" "systemctl" "restart" "grafana.service"];
};
}
(lib.mkIf (cfg.googleOAuthClientID != "") {
"grafana-google-sso-secret" = {
source = "${privateKeyPath}/grafana-google-sso.secret";
destination = "/run/keys/grafana-google-sso.secret";
owner.user = config.systemd.services.grafana.serviceConfig.User;
owner.group = config.users.users.grafana.group;
permissions = "0400";
action = ["sudo" "systemctl" "restart" "grafana.service"];
};
})
(lib.mkIf cfg.enableSlackAlert {
"grafana-slack-url" = {
source = "${privateKeyPath}/grafana-slack-url";
destination = "/run/keys/grafana-slack-url";
owner.user = config.systemd.services.grafana.serviceConfig.User;
owner.group = config.users.users.grafana.group;
permissions = "0400";
action = ["sudo" "systemctl" "restart" "grafana.service"];
};
})
(lib.mkIf cfg.enableZulipAlert {
"grafana-zulip-url" = {
source = "${privateKeyPath}/grafana-zulip-url";
destination = "/run/keys/grafana-zulip-url";
owner.user = config.systemd.services.grafana.serviceConfig.User;
owner.group = config.users.users.grafana.group;
permissions = "0400";
action = ["sudo" "systemctl" "restart" "grafana.service"];
};
})
];
networking.hosts = hostsMap;
services.private-storage.monitoring.vpn.server = {
enable = true;
ip = monitoringvpnIPv4;
inherit vpnClientIPs;
pubKeysPath = "${publicKeyPath}/monitoringvpn";
};
};
imports = [
../../nixos/modules/monitoring/vpn/server.nix
../../nixos/modules/monitoring/server/grafana.nix
../../nixos/modules/monitoring/server/prometheus.nix
../../nixos/modules/monitoring/exporters/node.nix
# Loki 0.3.0 from Nixpkgs 19.09 is too old and does not work:
# ../../nixos/modules/monitoring/server/loki.nix
];
services.private-storage.monitoring.prometheus = {
inherit nodeExporterTargets;
inherit (cfg) paymentExporterTargets blackboxExporterHttpsTargets;
nginxExporterTargets = [];
};
services.private-storage.monitoring.grafana = {
inherit (cfg) googleOAuthClientID enableSlackAlert enableZulipAlert;
inherit letsEncryptAdminEmail;
domains = cfg.monitoringDomains;
};
services.private-storage.monitoring.exporters.node.enable = true;
};
}
{ lib, config, ...}:
let
inherit (config.grid) publicKeyPath privateKeyPath monitoringvpnEndpoint monitoringvpnIPv4;
in {
config = {
deployment = {
secrets = {
"monitoringvpn-secret-key" = {
destination = "/run/keys/monitoringvpn/client.key";
source = "${privateKeyPath}/monitoringvpn/${monitoringvpnIPv4}.key";
owner.user = "root";
owner.group = "root";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "wireguard-monitoringvpn.service"];
};
"monitoringvpn-preshared-key" = {
destination = "/run/keys/monitoringvpn/preshared.key";
source = "${privateKeyPath}/monitoringvpn/preshared.key";
owner.user = "root";
owner.group = "root";
permissions = "0400";
action = ["sudo" "systemctl" "restart" "wireguard-monitoringvpn.service"];
};
};
};
services.private-storage.monitoring.vpn.client = {
enable = true;
ip = monitoringvpnIPv4;
endpoint = monitoringvpnEndpoint;
endpointPublicKeyFile = "${publicKeyPath}/monitoringvpn/server.pub";
};
};
}