Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • privatestorage/PrivateStorageio
  • tomprince/PrivateStorageio
2 results
Show changes
Showing
with 303 additions and 71 deletions
An7g9oexXQizNu6PTNWuLHDprwd5GydHHd2fuImvhGs=
mVXVGBpS/rHp5qQG8izNdP/Tpj5TXO9CA4CGJ5c0cXk=
aKsdXaE+1YINE71pX2BLiaIrxeSXbr/F/lHo/gDSxG4=
GCnw0k/Y4HDkRCSpZ/hrpMIGQt6LViS7ub25cpbHm3Q=
8xMB69/yQDyjfXbPWn3VWqXKqRT/yCZ/RGjy1hLBE2Y=
fPUnFOzBZRJDBdSR6iS5AaC40KKy/2REiM16hx+woxk=
qS4rT+zjWrbXDhtEF4oyGv8/5oCIE1ZU9FF+O6AL8V4=
An7g9oexXQizNu6PTNWuLHDprwd5GydHHd2fuImvhGs=
let
bdonneaux = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIZtWY7t8HVnaz6bluYsrAlzZC3MZtb8g0nO5L5fCQKR cardno:000619776016"
];
flo = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII6EUU/KNDr7y3m5OVWBZAuPiMJ4us3YOBEhxpG29yPN flo@la"
];
lastresort = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE1hy9mPkJI+7mY2Uq6CLpuFMMLOTfiY2sRJHwpihgRt cardno:26 269 859"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJPYMUVNuWr2y+FL1GxW6S6jb3BWYhbzJ2zhvQVKu2ll cardno:23 845 763"
];
in {
"root" = bdonneaux ++ flo ++ lastresort;
inherit bdonneaux;
inherit flo;
inherit lastresort;
}
#100tb
{ "hostId" = "00000001";
"interface" = "eno1";
"publicIPv4" = "185.225.209.174";
"prefixLength" = 24;
"gateway" = "185.225.209.1";
"gatewayInterface" = "eno1";
}
# NixOS configuration specific to this node
{ config, lib, pkgs, ... }:
{
imports =
[ <nixpkgs/nixos/modules/installer/scan/not-detected.nix>
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" "sr_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
boot.kernel.sysctl = { "vm.swappiness" = 1; };
fileSystems."/" =
{ device = "/dev/disk/by-uuid/d0837e6f-72cb-4ffa-85ba-fd57bbbd9a97";
fsType = "ext4";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/915E-08F9";
fsType = "vfat";
};
fileSystems."/storage" =
{ device = "root";
fsType = "zfs";
};
swapDevices = [ {
device = "/dev/disk/by-partuuid/9f8f4ed8-9c26-45ff-ba1b-648a3babc050";
randomEncryption = true;
} ];
nix.settings.max-jobs = lib.mkDefault 24;
powerManagement.cpuFreqGovernor = lib.mkDefault "powersave";
}
#Servermania
{ "hostId" = "00000002";
"interface" = "eno1";
"publicIPv4" = "38.170.241.34";
"prefixLength" = 29;
"gateway" = "38.170.241.33";
"gatewayInterface" = "eno1";
}
# NixOS configuration specific to this node
{ config, lib, pkgs, ... }:
{
imports =
[ <nixpkgs/nixos/modules/installer/scan/not-detected.nix>
];
boot.initrd.availableKernelModules = [ "ahci" "xhci_pci" "ehci_pci" "megaraid_sas" "usbhid" "usb_storage" "sd_mod" "sr_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
boot.kernel.sysctl = { "vm.swappiness" = 1; };
fileSystems."/" =
{ device = "/dev/disk/by-uuid/c8d29070-c5d3-4cfb-9bb2-c14d3727c45b";
fsType = "ext4";
};
# Manually created using:
# zpool create -m legacy -o ashift=12 root raidz /dev/disk/by-id/{wwn-0x5000cca25dcc966d,wwn-0x5000cca24cec02e3,wwn-0x5000cca25dcc7711,wwn-0x5000cca25dccca63,wwn-0x5000cca25dcc74b6,wwn-0x5000cca25dcc4591,wwn-0x5000cca25dcc4461}
fileSystems."/storage" = {
device = "root";
fsType = "zfs";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/3648-C8F5";
fsType = "vfat";
};
swapDevices = [ {
device = "/dev/disk/by-partuuid/e0e81098-3b66-4486-b138-b320645bc698";
randomEncryption = true;
} ];
nix.settings.max-jobs = lib.mkDefault 24;
powerManagement.cpuFreqGovernor = lib.mkDefault "powersave";
}
#OVH
{ "hostId" = "00000001";
"interface" = "eno3";
"publicIPv4" = "151.80.28.108";
"prefixLength" = 24;
"gateway" = "151.80.28.254";
"gatewayInterface" = "eno3";
}
# NixOS configuration specific to this node
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ehci_pci" "ahci" "nvme" "usbhid" "usb_storage" "sd_mod" "sr_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
boot.kernel.sysctl = { "vm.swappiness" = 1; };
fileSystems."/" =
{ device = "/dev/disk/by-uuid/8f88c0f1-3aef-41ec-bfd7-55b4ba6c1341";
fsType = "ext4";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/23DC-4051";
fsType = "vfat";
};
fileSystems."/storage" = {
device = "root";
fsType = "zfs";
};
swapDevices = [ {
device = "/dev/disk/by-partuuid/f56b5104-e4a7-443e-91c8-4555eb27899e";
randomEncryption = true;
} ];
hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
powerManagement.cpuFreqGovernor = lib.mkDefault "powersave";
}
......@@ -8,14 +8,18 @@ Issues with networking that looked like guest misconfigurations vanished after c
This requires `NixOS <https://nixos.org/>`_.
Nix without the OS will not work.
Use the local development environment
`````````````````````````````````````
0. Add VirtualBox to your NixOs system configuration at ``/etc/nixos/configuration.nix``::
0. Add to your NixOS system configuration at ``/etc/nixos/configuration.nix`` (and rebuild)::
virtualisation.virtualbox.host.enable = true;
# Save bytes and build time, optional but recommended:
virtualisation.virtualbox.host.headless = true;
# Enable libvirt - likely incompatible with virtualisation.virtualbox!
virtualisation.libvirtd.enable = true;
# Required for LibVirt
security.polkit.enable = true;
# Enable HW acceleration if (nested virtualisation is) available
#boot.kernelModules = [ "kvm-amd" "kvm-intel" ];
1. Enter the morph local grid directory::
......@@ -27,19 +31,27 @@ Use the local development environment
3. Build and start the VMs::
VAGRANT_DEFAULT_PROVIDER=virtualbox vagrant up
vagrant up --provider=libvirt
Optionally, to switch from QEMU to KVM virtualization, edit the virtual machine definition of all the machines and replace the "qemu" on the first line with "kvm"::
sudo virsh list
sudo virsh edit <machine id> (once for every machine)
vagrant halt
vagrant up
4. Then, add the Vagrant SSH configuration to your user's ``~/.ssh/config`` file::
install -d ~/.ssh ; vagrant ssh-config >> ~/.ssh/config
Latest Morph honors the ``SSH_CONFIG_FILE`` environment variable (`since 3f90aa88 (March 2020, v 1.5.0) <https://github.com/DBCDK/morph/commit/3f90aa885fac1c29fce9242452fa7c0c505744ef#diff-d155ad793bd62e6ea4c44ba985049ecb13a4f4f32f799791b2bce695a16c0101>`_), so in the future this should get a bit more convenient.
Latest Morph honors the ``SSH_CONFIG_FILE`` environment variable (`since 3f90aa88 (March 2020, v 1.5.0) <https://github.com/DBCDK/morph/commit/3f90aa885fac1c29fce9242452fa7c0c505744ef#diff-d155ad793bd62e6ea4c44ba985049ecb13a4f4f32f799791b2bce695a16c0101>`_), so in the future this should get a bit more convenient.
6. Create a ``public-keys/users.nix`` file with your SSH key (see ``public-keys/users.nix.example`` for the format) so you'll be able to log in after deploying the new configuration::
5. Create a ``public-keys/users.nix`` file with your SSH key (see ``public-keys/users.nix.example`` for the format) so you'll be able to log in after deploying the new configuration::
$EDITOR public-keys/users.nix
7. Then, build and deploy our software to the Vagrant VMs::
6. Then, build and deploy our software to the Vagrant VMs::
morph build grid.nix
morph push grid.nix
......@@ -48,4 +60,4 @@ Use the local development environment
vagrant up
morph upload-secrets grid.nix
You should now be able to log in with the users and keys you set in your ``users.nix`` file.
You should now be able to log in with the users and keys you set in your ``users.nix`` file.
# -*- mode: ruby -*-
# vi: set ft=ruby :
# This Vagrantfile worked for Florian Sesser using Vagrant 2.2.16dev and
# the VirtualBox Hypervisor. Earlier Vagrant and LibVirt did not work.
# This Vagrantfile worked for Florian Sesser using Vagrant 2.2.19 and
# the LibVirt with QEmu Hypervisor. Earlier Vagrant and VirtualBox did worked too.
# Get a dedicated LibVirt pool name or use default one
pool_name = ENV.has_key?('POOL_NAME') ? ENV['POOL_NAME'] : 'default'
# For instance, one could create such pool beforehand as follows:
# export POOL_NAME=morph_local_$(id -un)
# POOL_PATH="/path/to/your/storage"
# mkdir -p "${POOL_PATH}"
# sudo virsh pool-define-as ${POOL_NAME} --type dir --target "${POOL_PATH}"
# sudo virsh pool-autostart ${POOL_NAME}
# sudo virsh pool-start ${POOL_NAME}
Vagrant.configure("2") do |config|
# For a complete reference, please see the online documentation at
# https://docs.vagrantup.com.
config.vm.define "payments.localdev" do |config|
config.vm.hostname = "payments"
config.vm.box = "esselius/nixos"
config.vm.box_version = "20.09"
config.vm.box_check_update = false
# Select the base image
config.vm.box = "esselius/nixos"
config.vm.box_version = "20.09"
config.vm.box_check_update = false
# No need to sync the working dir. with the guest boxess
# Better use SFTP to transfer
config.vm.synced_folder ".", "/vagrant", disabled: true
# Tune LibVirt/QEmu guests
config.vm.provider :libvirt do |domain|
# The default of one CPU should work
# Increase to speed up boot/push/deploy
# domain.cpus = 1
# To use the self-updating deployment system you need more memory. Giving
# all of the VMs enough memory for this is rather taxing, though, and the
# self-updating deployment system is not particularly useful for local
# dev. But should you want to:
#
# config.vm.provider "virtualbox" do |v|
# v.memory = 4096
# end
# domain.memory = 4096
#
# Meanwhile, 1024 was apparently the default with VirtualBox
domain.memory = 1024
# Using a specific pool may help to manage the disk space
domain.storage_pool_name = pool_name
domain.snapshot_pool_name = pool_name
# No need of graphics - better use serial
domain.graphics_type = "none"
domain.video_type = "none"
end
config.vm.define "payments.localdev" do |config|
config.vm.hostname = "payments"
config.vm.network "private_network", ip: "192.168.67.21"
# Assign a static IP address inside the box host-only (Vagrant
# calls it "private") network. The address must be in the range
# VirtualBox allows.
# https://www.virtualbox.org/manual/ch06.html#network_hostonly says some
# things about this.
config.vm.network "private_network", ip: "192.168.56.21"
# Add self signed SSL key for zkap-issuer:
config.vm.provision "file", source: "private-keys/payments-localdev-ssl", destination: "/tmp/payments-localdev-ssl"
config.vm.provision "shell", inline: "sudo mkdir -p /var/lib/letsencrypt/live/payments.localdev/"
......@@ -32,35 +69,30 @@ Vagrant.configure("2") do |config|
config.vm.define "storage1.localdev" do |config|
config.vm.hostname = "storage1"
config.vm.box = "esselius/nixos"
config.vm.box_version = "20.09"
config.vm.box_check_update = false
config.vm.network "private_network", ip: "192.168.67.22"
config.vm.network "private_network", ip: "192.168.56.22"
end
config.vm.define "storage2.localdev" do |config|
config.vm.hostname = "storage2"
config.vm.box = "esselius/nixos"
config.vm.box_version = "20.09"
config.vm.box_check_update = false
config.vm.network "private_network", ip: "192.168.67.23"
config.vm.network "private_network", ip: "192.168.56.23"
end
config.vm.define "monitoring.localdev" do |config|
config.vm.hostname = "monitoring"
config.vm.box = "esselius/nixos"
config.vm.box_version = "20.09"
config.vm.box_check_update = false
config.vm.network "private_network", ip: "192.168.67.24"
config.vm.network "private_network", ip: "192.168.56.24"
end
# To make the VMs assign the static IPs to the network interfaces we need a rebuild:
config.vm.provision "shell", inline: "echo '{nix.trustedUsers = [ \"@wheel\" \"root\" \"vagrant\" ];}' > /etc/nixos/custom-configuration.nix"
## Rename to 'nix.settings.trusted-users' after 20.09 or so:
config.vm.provision "shell",
inline: "echo '{ nix.trustedUsers = [ \"@wheel\" \"root\" \"vagrant\" ]; boot.kernelParams = [ \"console=tty0\" \"console=ttyS0,115200\" ]; }' > /etc/nixos/custom-configuration.nix"
config.vm.provision "shell", inline: "nixos-rebuild switch"
config.vm.provision "shell", inline: "systemctl stop firewall.service"
config.vm.provision "shell", inline: "systemctl start serial-getty@ttyS0.service"
config.trigger.after :up do |trigger|
trigger.info = "Hostname and IP address this host actually uses:"
trigger.run_remote = {inline: "echo `hostname` `ifconfig | egrep -o '192.168.67.[0-9]* '`"}
trigger.run_remote = {inline: "echo `hostname` `ifconfig | egrep -o '192.168.56.[0-9]* '`"}
end
end
......@@ -2,9 +2,10 @@
, "publicStoragePort": 8898
, "publicKeyPath": "./public-keys"
, "privateKeyPath": "./private-keys"
, "monitoringvpnEndpoint": "192.168.67.24:51820"
, "monitoringvpnEndpoint": "192.168.56.24:51820"
, "passValue": 1000000
, "issuerDomains": ["payments.localdev"]
, "monitoringDomains": ["monitoring.localdev"]
, "letsEncryptAdminEmail": "florian@privatestorage.io"
, "allowedChargeOrigins": [
"http://localhost:5000"
......
let
pkgs = import <nixpkgs> { };
gridlib = import ../../lib;
grid-config = pkgs.lib.trivial.importJSON ./config.json;
grid-config = builtins.fromJSON (builtins.readFile ./config.json);
ssh-users = let
ssh-users-file = ./public-keys/users.nix;
......@@ -27,6 +25,8 @@ let
../../../nixos/modules/deployment.nix
# Give it a good SSH configuration.
../../../nixos/modules/ssh.nix
# Configure things specific to the virtualisation environment.
gridlib.hardware-vagrant
];
services.private-storage.sshUsers = ssh-users;
......@@ -46,7 +46,7 @@ let
# depend on the format we use.
mode = "0666";
text = ''
# Include the ssh-users config
# Include the ssh-users config
builtins.fromJSON (builtins.readFile ./ssh-users.json)
'';
};
......@@ -57,6 +57,7 @@ let
grid = {
publicKeyPath = toString ./. + "/${grid-config.publicKeyPath}";
privateKeyPath = toString ./. + "/${grid-config.privateKeyPath}";
inherit (grid-config) monitoringvpnEndpoint letsEncryptAdminEmail;
};
# Configure deployment management authorization for all systems in the grid.
services.private-storage.deployment = {
......@@ -68,67 +69,76 @@ let
payments = {
imports = [
gridlib.issuer
(gridlib.hardware-virtual ({ publicIPv4 = "192.168.67.21"; }))
(gridlib.customize-issuer (grid-config // {
monitoringvpnIPv4 = "172.23.23.11";
}))
grid-module
];
config = {
grid.monitoringvpnIPv4 = "172.23.23.11";
grid.publicIPv4 = "192.168.56.21";
grid.issuer = {
inherit (grid-config) issuerDomains allowedChargeOrigins;
};
};
};
storage1 = {
imports = [
gridlib.storage
(gridlib.hardware-virtual ({ publicIPv4 = "192.168.67.22"; }))
(gridlib.customize-storage (grid-config // {
monitoringvpnIPv4 = "172.23.23.12";
stateVersion = "19.09";
}))
grid-module
];
config = {
grid.monitoringvpnIPv4 = "172.23.23.12";
grid.publicIPv4 = "192.168.56.22";
grid.storage = {
inherit (grid-config) passValue publicStoragePort;
};
system.stateVersion = "19.09";
};
};
storage2 = {
imports = [
gridlib.storage
(gridlib.hardware-virtual ({ publicIPv4 = "192.168.67.23"; }))
(gridlib.customize-storage (grid-config // {
monitoringvpnIPv4 = "172.23.23.13";
stateVersion = "19.09";
}))
grid-module
];
config = {
grid.monitoringvpnIPv4 = "172.23.23.13";
grid.publicIPv4 = "192.168.56.23";
grid.storage = {
inherit (grid-config) passValue publicStoragePort;
};
system.stateVersion = "19.09";
};
};
monitoring = {
imports = [
gridlib.monitoring
(gridlib.hardware-virtual ({ publicIPv4 = "192.168.67.24"; }))
(gridlib.customize-monitoring {
inherit hostsMap vpnClientIPs nodeExporterTargets paymentExporterTargets;
inherit (grid-config) letsEncryptAdminEmail;
googleOAuthClientID = grid-config.monitoringGoogleOAuthClientID;
monitoringvpnIPv4 = "172.23.23.1";
stateVersion = "19.09";
})
grid-module
];
config = {
grid.monitoringvpnIPv4 = "172.23.23.1";
grid.publicIPv4 = "192.168.56.24";
grid.monitoring = {
inherit paymentExporterTargets blackboxExporterHttpsTargets;
inherit (grid-config) monitoringDomains;
googleOAuthClientID = grid-config.monitoringGoogleOAuthClientID;
enableZulipAlert = false;
};
system.stateVersion = "19.09";
};
};
# TBD: derive these automatically:
hostsMap = {
"172.23.23.1" = [ "monitoring" "monitoring.monitoringvpn" ];
"172.23.23.11" = [ "payments" "payments.monitoringvpn" ];
"172.23.23.12" = [ "storage1" "storage1.monitoringvpn" ];
"172.23.23.13" = [ "storage2" "storage2.monitoringvpn" ];
};
vpnClientIPs = [ "172.23.23.11" "172.23.23.12" "172.23.23.13" ];
nodeExporterTargets = [ "monitoring" "payments" "storage1" "storage2" ];
paymentExporterTargets = [ "payments" ];
paymentExporterTargets = [ "payments.monitoringvpn" ];
blackboxExporterHttpsTargets = [
# "https://private.storage/"
# "https://payments.private.storage/"
];
in {
network = {
description = "PrivateStorage.io LocalDev Grid";
inherit (gridlib) pkgs;
};
inherit payments monitoring storage1 storage2;
}
......@@ -11,7 +11,8 @@ You can find more information about some of these secrets in ``ops/generating-ke
deploy_key
----------
This is an SSH private key which will be authorized to trigger a deployment update on the deployment hosts themselves.
This SSH private key authenticates an account used by the continuous deployment system.
Each node authorizes that account to trigger a deployment update on itself.
The corresponding SSH public key is kept in the ``public-keys`` location.
grafana-admin.password
......@@ -19,6 +20,20 @@ grafana-admin.password
This is the initial admin password for the Grafana web admin on the monitoring host.
grafana-slack-url
-----------------
This file is read by Grafana's systemd service to set an environment variable with a secret Slack WebHook URL to post alerts to.
The only line in the file should be the secret URL.
Use the url from `this 1Password entry <https://privatestorage.1password.com/vaults/7flqasy5hhhmlbtp5qozd3j4ga/allitems/cgznskz2oix2tyx5xyntwaos5i>`_ or get a new secret URL for your Slack channel at https://www.slack.com/apps/A0F7XDUAZ.
grafana-zulip-url
-----------------
This file should contain a single line with the secret Zulip alerting Webhook Bot URL.
The URLs for Staging and Production are both stored in 1Password.
See `https://zulip.com/integrations/doc/grafana`_ for documentation and ``grid/local/private-keys/grafana-zulip-url`` for an example.
stripe.secret
-------------
......