Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision

Target

Select target project
  • tomprince/PrivateStorageio
  • privatestorage/PrivateStorageio
2 results
Select Git revision
Show changes
Showing
with 1763 additions and 1090 deletions
......@@ -7,18 +7,9 @@
let
cfg = config.services.private-storage.monitoring.grafana;
grafanaAuth = if (cfg.googleOAuthClientID == "") then {
anonymous.enable = true;
} else {
google.enable = true;
# Grafana considers it "sign up" to let in a user it has
# never seen before.
google.allowSignUp = true;
google.clientSecretFile = cfg.googleOAuthClientSecretFile;
google.clientId = cfg.googleOAuthClientID;
};
in {
options.services.private-storage.monitoring.grafana = {
domains = lib.mkOption
{ type = lib.types.listOf lib.types.str;
......@@ -77,6 +68,21 @@ in {
Where to find the file that containts the slack URL.
'';
};
enableZulipAlert = lib.mkOption
{ type = lib.types.bool;
default = false;
description = ''
Enables the Zulip alerter. Expects a file that contains
the secret Zulip Web Hook URL in grafanaZulipUrlFile (see below).
'';
};
grafanaZulipUrlFile = lib.mkOption
{ type = lib.types.path;
default = /run/keys/grafana-zulip-url;
description = ''
Where to find the file that containts the Zulip URL.
'';
};
};
config =
......@@ -90,59 +96,80 @@ in {
services.grafana = {
enable = true;
inherit domain;
port = 2342;
addr = "127.0.0.1";
# No phoning home
analytics.reporting.enable = false;
settings = {
server = {
domain = "${toString domain}";
http_port = 2342;
http_addr = "127.0.0.1";
# Defend against DNS rebinding attacks.
enforce_domain = true;
# Force Grafana to believe it is reachable via https on the default port
# number because that's where the nginx that forwards traffic to it is
# listening. Grafana's own server listens on an internal address that
# doesn't matter to anyone except our nginx instance.
rootUrl = "https://%(domain)s/";
root_url = "https://%(domain)s/";
};
# No phoning home
analytics.reporting_enabled = false;
extraOptions = {
# Defend against DNS rebinding attacks.
SERVER_ENFORCE_DOMAIN = "true";
# Same time zone for all users by default
DATE_FORMATS_DEFAULT_TIMEZONE = "UTC";
};
date_formats.default_timezone = "UTC";
auth = {
anonymous.org_role = "Admin";
anonymous.org_name = "Main Org.";
} // grafanaAuth;
# The auth sections since NixOS 22.11 are named a bit funky with a dot in the name
#
# https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/grafana/#anonymous-authentication
# https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/google/
"auth.anonymous" = lib.mkIf (cfg.googleOAuthClientID == "") {
enabled = true;
org_role = "Admin";
org_name = "Main Org.";
};
"auth.google" = lib.mkIf (cfg.googleOAuthClientID != "") {
enabled = true;
# Grafana considers it "sign up" to let in a user it has
# never seen before.
allow_sign_up = true;
client_secret = "$__file{${toString cfg.googleOAuthClientSecretFile}}";
client_id = cfg.googleOAuthClientID;
};
# Give users that come through GSuite SSO the highest possible privileges:
users.autoAssignOrgRole = "Editor";
users.auto_assign_org_role = "Editor";
# Read the admin password from a file in our secrets folder:
security.adminPasswordFile = cfg.adminPasswordFile;
security.admin_password = "$__file{${toString cfg.adminPasswordFile}}";
};
provision = {
enable = true;
# See https://grafana.com/docs/grafana/latest/administration/provisioning/#datasources
datasources = [{
datasources.settings.datasources = [{
name = "Prometheus";
type = "prometheus";
uid = "LocalPrometheus";
access = "proxy";
url = cfg.prometheusUrl;
isDefault = true;
} {
name = "Loki";
type = "loki";
uid = "LocalLoki";
access = "proxy";
url = cfg.lokiUrl;
}];
# See https://grafana.com/docs/grafana/latest/administration/provisioning/#dashboards
dashboards = [{
dashboards.settings.providers = [{
name = "provisioned";
options.path = ./grafana-dashboards;
}];
# See https://grafana.com/docs/grafana/latest/administration/provisioning/#example-alert-notification-channels-config-file
notifiers = [ ] ++ (lib.optionals (cfg.enableSlackAlert) [{
# See https://grafana.com/docs/grafana/latest/alerting/set-up/provision-alerting-resources/file-provisioning/#provision-contact-points
alerting.contactPoints.settings.contactPoints =
[ ] ++ (lib.optionals (cfg.enableSlackAlert) [{
uid = "slack-notifier-1";
name = "Slack";
type = "slack";
......@@ -157,12 +184,22 @@ in {
# See https://grafana.com/docs/grafana/latest/administration/configuration/#file-provider
url = "$__file{${toString cfg.grafanaSlackUrlFile}}";
};
}]) ++ (lib.optionals (cfg.enableZulipAlert) [{
# See https://zulip.com/integrations/doc/grafana
uid = "zulip-notifier-1";
name = "Zulip";
type = "webhook";
is_default = true;
send_reminder = false;
settings = {
url = "$__file{${toString cfg.grafanaZulipUrlFile}}";
};
}]);
};
};
# nginx reverse proxy
security.acme.email = cfg.letsEncryptAdminEmail;
security.acme.defaults.email = cfg.letsEncryptAdminEmail;
security.acme.acceptTerms = true;
services.nginx = {
enable = true;
......@@ -180,9 +217,20 @@ in {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.grafana.port}";
proxyPass = "http://127.0.0.1:${toString config.services.grafana.settings.server.http_port}";
proxyWebsockets = true;
};
locations."/metrics" = {
# Only allow our monitoringvpn subnet
# And localhost since we're the monitoring server currently
extraConfig = ''
allow ${config.grid.monitoringvpnIPv4}/24;
allow 127.0.0.1;
allow ::1;
deny all;
'';
proxyPass = "http://127.0.0.1:${toString config.services.grafana.settings.server.http_port}";
};
};
};
......
# Loki Server
#
# Scope: Log aggregator
# Scope: Log ingester and aggregator to be run on the monitoring node
#
# See also:
# - The configuration is adapted from
# https://grafana.com/docs/loki/latest/configuration/examples/#complete-local-configyaml
#
{
config.networking.firewall.allowedTCPPorts = [ 3100 ];
{ config, ...}:
let
logRetention = toString(config.services.private-storage.monitoring.policy.logRetentionSeconds) + "s";
in {
config.networking.firewall.interfaces.monitoringvpn.allowedTCPPorts = [ 3100 ];
config.services.loki = {
enable = true;
......@@ -12,31 +21,39 @@
{
auth_enabled = false;
server = {
http_listen_port = 3100;
};
ingester = {
lifecycler = {
address = "0.0.0.0";
common = {
ring = {
kvstore = {
store = "inmemory";
};
};
instance_addr = "127.0.0.1";
replication_factor = 1;
path_prefix = "/var/lib/loki";
storage = {
filesystem = {
chunks_directory = "/var/lib/loki/chunks";
rules_directory = "/var/lib/loki/rules";
};
};
};
server = {
http_listen_port = 3100;
grpc_listen_port = 9095; # unused, but no option to turn it off.
grpc_listen_address = "127.0.0.1"; # unused, but no option to turn it off.
};
ingester = {
lifecycler = {
final_sleep = "0s";
};
chunk_idle_period = "1h"; # Any chunk not receiving new logs in this time will be flushed
max_chunk_age = "1h"; # All chunks will be flushed when they hit this age, default is 1h
chunk_target_size = 1048576; # Loki will attempt to build chunks up to 1.5MB, flushing first if chunk_idle_period or max_chunk_age is reached first
chunk_retain_period = "30s"; # Must be greater than index read cache TTL if using an index cache (Default index read cache TTL is 5m)
max_transfer_retries = 0; # Chunk transfers disabled
chunk_target_size = 1536000; # As per https://grafana.com/docs/loki/v2.2.1/best-practices/
};
schema_config = {
configs = [{
from = "2020-10-24"; # TODO: Should this be "today"?
from = "2020-12-26";
store = "boltdb-shipper";
object_store = "filesystem";
schema = "v11";
......@@ -47,30 +64,19 @@
}];
};
storage_config = {
boltdb_shipper = {
active_index_directory = "/var/lib/loki/boltdb-shipper-active";
cache_location = "/var/lib/loki/boltdb-shipper-cache";
cache_ttl = "24h"; # Can be increased for faster performance over longer query periods, uses more disk space
shared_store = "filesystem";
};
filesystem = {
directory = "/var/lib/loki/chunks";
};
};
limits_config = {
reject_old_samples = true;
reject_old_samples_max_age = "168h";
};
chunk_store_config = {
max_look_back_period = "336h";
allow_structured_metadata = false;
};
table_manager = {
retention_deletes_enabled = true;
retention_period = "336h";
retention_period = logRetention;
};
compactor = {
retention_enabled = true;
delete_request_store = "filesystem";
working_directory = "/var/lib/loki/compactor";
};
};
};
......
......@@ -10,9 +10,10 @@ let
cfg = config.services.private-storage.monitoring.prometheus;
dropPortNumber = {
source_labels = [ "__address__" ];
regex = "^(.*):\\d+$";
regex = "^(.*)(?:\\.monitoringvpn):\\d+$";
target_label = "instance";
};
logRetention = toString(config.services.private-storage.monitoring.policy.logRetentionSeconds) + "s";
in {
options.services.private-storage.monitoring.prometheus = {
......@@ -44,6 +45,7 @@ in {
services.prometheus = {
enable = true;
# port = 9090; # Option only in recent (20.09?) nixpkgs, 9090 default
retentionTime = logRetention;
scrapeConfigs = [
{
job_name = "node-exporters";
......
......@@ -17,7 +17,10 @@ let
#
# NOTE: This is promised by the service privacy policy. It *may not* be
# raised without following the process for updating the privacy policy.
max-incident-age = "29d";
# Fallback to 29d if "monitoring" attribute is not available (currently
# in the system tests)
max-incident-age = toString(cfg.monitoring.policy.logRetentionSeconds or
(29 * (24 * 60 * 60))) + "s";
fqdn = "${
assert config.networking.hostName != null; config.networking.hostName
......@@ -67,6 +70,14 @@ in
The port number on which to service storage clients.
'';
};
services.private-storage.publicReadOnlyStoragePort = lib.mkOption
{ default = 8899;
type = lib.types.int;
example = 8099;
description = ''
The port number on which to service read-only storage clients.
'';
};
services.private-storage.issuerRootURL = lib.mkOption
{ default = "https://issuer.${config.networking.domain}/";
type = lib.types.str;
......@@ -96,11 +107,39 @@ in
# Define configuration based on values given for our options - starting with
# the option that says whether this is even turned on.
config = lib.mkIf cfg.enable
{ services.tahoe.nodes."${storage-node-name}" =
{
# A read-only storage service. This allows read-only access for clients
# that use Great Black Swamp. There is no ZKAP/GBS integration yet so
# this is the most we can do at the moment.
services.tahoe.nodes."ro-${storage-node-name}" =
{ package = cfg.tahoe.package;
# Each attribute in this set corresponds to a section in the tahoe.cfg
# file. Attributes on those sets correspond to individual assignments
# in those sections.
sections =
{ client = if cfg.introducerFURL == null then {} else
{ "introducer.furl" = cfg.introducerFURL;
};
node =
{ nickname = "ro-${storage-node-name}";
"tub.port" = "tcp:${toString cfg.publicReadOnlyStoragePort}";
"tub.location" = "tcp:${cfg.publicAddress}:${toString cfg.publicReadOnlyStoragePort}";
};
storage =
{ enabled = true;
storage_dir = "/storage";
readonly = true;
force_foolscap = false;
};
};
};
# Tahoe nixos module brings along a single socket for the web api.
# That's for the other storage node though. Turn off the integration
# with this one.
systemd.services."tahoe.ro-storage".unitConfig.Requires = lib.mkForce [];
services.tahoe.nodes."${storage-node-name}" =
{ package = cfg.tahoe.package;
# Each attribute in this set corresponds to a section in the
# tahoe.cfg file. Attributes on those sets correspond to individual
# assignments in those sections.
#
# We just populate this according to policy/preference of Private
# Storage.
......@@ -111,21 +150,20 @@ in
node =
# XXX Should try to name that is unique across the grid.
{ nickname = "${storage-node-name}";
# We have the web port active because the CLI uses it. We may
# eventually turn this off, or at least have it off by default (with
# an option to turn it on). I don't know how much we'll use the CLI
# on the nodes. Maybe very little? Or maybe it will be part of a
# health check for the node... In any case, we tell it to bind to
# localhost so no one *else* can use it. And the principle of the
# web interface is that merely having access to it doesn't grant
# access to any data. It does grant access to storage capabilities
# but with our plugin configuration you still need ZKAPs to use
# those...
"web.port" = "tcp:3456:interface=127.0.0.1";
# We have the web port active because the CLI uses it and
# because it exposes a metrics endpoint for our monitoring
# system. The actual port configuration lives in systemd so
# that it can order binding the socket correctly with other
# dependencies (which we can't reliably do with Tahoe
# without a bunch of other work).
"web.port" = "systemd:domain=INET:index=0";
# We have to tell Tahoe-LAFS where to listen for Foolscap
# connections for the storage protocol. We have to tell it twice.
# First, in the syntax which it uses to listen.
"tub.port" = "tcp:${toString cfg.publicStoragePort}";
# Second, in the syntax it advertises to in the fURL.
"tub.location" = "tcp:${cfg.publicAddress}:${toString cfg.publicStoragePort}";
};
......@@ -134,9 +172,9 @@ in
# Put the storage where we have a lot of space configured.
storage_dir = "/storage";
# Turn on our plugin.
plugins = "privatestorageio-zkapauthz-v1";
plugins = "privatestorageio-zkapauthz-v2";
};
"storageserver.plugins.privatestorageio-zkapauthz-v1" =
"storageserver.plugins.privatestorageio-zkapauthz-v2" =
{ "ristretto-issuer-root-url" = cfg.issuerRootURL;
"ristretto-signing-key-path" = cfg.ristrettoSigningKeyPath;
} // (
......@@ -148,7 +186,7 @@ in
};
# Let traffic destined for the storage node's Foolscap server through.
networking.firewall.allowedTCPPorts = [ cfg.publicStoragePort ];
networking.firewall.allowedTCPPorts = [ cfg.publicStoragePort cfg.publicReadOnlyStoragePort ];
systemd.tmpfiles.rules =
# Add a rule to prevent incident reports from accumulating indefinitely.
......@@ -160,6 +198,5 @@ in
# Provide a useful tool for reporting about shares.
ourpkgs.leasereport
];
};
}
# Provide secure defaults for systemd services
#
# Good reads:
# https://gist.github.com/ageis/f5595e59b1cddb1513d1b425a323db04
# https://docs.arbitrary.ch/security/systemd.html
# https://www.freedesktop.org/software/systemd/man/systemd.exec.html
{
DynamicUser = true;
# This set of restrictions is mostly dervied from
# - running `systemd-analyze security zkap-spending-service.service`
# - Looking at the restrictions from the nixos nginx config.
AmbientCapabilities = "";
CapabilityBoundingSet = "";
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateDevices = true;
PrivateMounts = true;
PrivateNetwork = true;
PrivateTmp = true;
PrivateUsers = true;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "strict";
RemoveIPC = true;
RestrictAddressFamilies = "AF_UNIX";
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
# Lines starting with "~" are deny-list the others are allow-list
# Since the first line is allow, that bounds the set of allowed syscalls
# and the further lines restrict it.
SystemCallFilter = [
# From systemd.exec(5), @system-service is "A reasonable set of
# system calls used by common system [...]"
"@system-service"
# This is from the nginx config, except that `@ipc` is not removed,
# since twisted uses a self-pipe.
"~@cpu-emulation @debug @keyring @mount @obsolete @privileged @setuid"
];
Umask = "0077";
}
# A NixOS module which can run a Ristretto-based issuer for PrivateStorage
# A NixOS module which can run a service tracking spending of ZKAPs.
# ZKAPs.
{ lib, pkgs, config, ourpkgs, ... }@args: let
cfg = config.services.private-storage-spending;
......@@ -40,70 +40,26 @@ in
wantedBy = [ "sockets.target" ];
listenStreams = [ cfg.unixSocket ];
};
# Add a systemd service to run zkap-spending-service.
systemd.services.zkap-spending-service = {
enable = true;
description = "ZKAP Spending Service";
wantedBy = [ "multi-user.target" ];
serviceConfig.NonBlocking = true;
serviceConfig = (import ./restricted-service.nix) // {
NonBlocking = true;
# It really shouldn't ever exit on its own! If it does, it's a bug
# we'll have to fix. Restart it and hope it doesn't happen too much
# before we can fix whatever the issue is.
serviceConfig.Restart = "always";
serviceConfig.Type = "simple";
# Use a unnamed user.
serviceConfig.DynamicUser = true;
Restart = "always";
Type = "simple";
serviceConfig = {
# Work around https://twistedmatrix.com/trac/ticket/10261
# Create a runtime directory so that the service has permission
# to change the mode on the socket.
RuntimeDirectory = "zkap-spending-service";
# This set of restrictions is mostly dervied from
# - running `systemd-analyze security zkap-spending-service.service
# - Looking at the restrictions from the nixos nginx config.
AmbientCapabilities = "";
CapabilityBoundingSet = "";
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateDevices = true;
PrivateMounts = true;
PrivateNetwork = true;
PrivateTmp = true;
PrivateUsers = true;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "strict";
RemoveIPC = true;
RestrictAddressFamilies = "AF_UNIX";
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
# Lines starting with "~" are deny-list the others are allow-list
# Since the first line is allow, that bounds the set of allowed syscalls
# and the further lines restrict it.
SystemCallFilter = [
# From systemd.exec(5), @system-service is "A reasonable set of
# system calls used by common system [...]"
"@system-service"
# This is from the nginx config, except that `@ipc` is not removed,
# since twisted uses a self-pipe.
"~@cpu-emulation @debug @keyring @mount @obsolete @privileged @setuid"
];
Umask = "0077";
};
script = let
......
......@@ -6,7 +6,7 @@
}: {
options = {
services.private-storage.sshUsers = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
type = lib.types.attrsOf (lib.types.listOf lib.types.str);
example = { root = "ssh-ed25519 AAA..."; };
description = ''
Users to configure on the issuer server and the storage servers and
......@@ -25,12 +25,9 @@
services.openssh = {
enable = true;
# We don't use SFTP for anything. No reason to expose it.
allowSFTP = false;
# We only allow key-based authentication.
challengeResponseAuthentication = false;
passwordAuthentication = false;
settings.KbdInteractiveAuthentication = false;
settings.PasswordAuthentication = false;
extraConfig = ''
# Possibly this is superfluous considering we don't allow
......@@ -44,9 +41,9 @@
};
users.users =
let makeUserConfig = username: sshPublicKey: {
let makeUserConfig = username: sshPublicKeys: {
isNormalUser = username != "root";
openssh.authorizedKeys.keys = [ sshPublicKey ];
openssh.authorizedKeys.keys = sshPublicKeys;
};
in builtins.mapAttrs makeUserConfig cfg.sshUsers;
};
......
......@@ -182,6 +182,17 @@ in
# Open up the firewall.
# networking.firewall.allowedTCPPorts = flip mapAttrsToList cfg.nodes
# (node: settings: settings.tub.port);
# Make systemd open a port for us:
# Systemd uses the socket name to link to the corresponding Service Unit.
systemd.sockets."tahoe.storage" = {
description = "Tahoe Web Server Socket";
wantedBy = [ "sockets.target" ];
socketConfig = {
ListenStream = "127.0.0.1:3456";
};
};
systemd.services = flip mapAttrs' cfg.nodes (node: settings:
let
pidfile = "/run/tahoe.${lib.escapeShellArg node}.pid";
......@@ -191,10 +202,18 @@ in
eliotLog = "file:${nodedir}/logs/eliot.json,rotate_length=${toString (1024 * 1024 * 32)},max_rotated_files=32";
in nameValuePair "tahoe.${node}" {
description = "Tahoe LAFS node ${node}";
# We are partially socket activated but only for the web API port.
# For the actual storage service port, we bind ourselves. So make
# sure we actually do start up early in case storage requests come
wantedBy = [ "multi-user.target" ];
path = [ settings.package ];
restartTriggers = [
config.environment.etc."tahoe-lafs/${node}.cfg".source ];
# We don't know how to re-read our configuration file at runtime
# so restart if it ever changes.
restartTriggers = [ config.environment.etc."tahoe-lafs/${node}.cfg".source ];
serviceConfig = {
Type = "simple";
PIDFile = pidfile;
......@@ -202,8 +221,12 @@ in
# arguments to $(tahoe run). The node directory must come first,
# and arguments which alter Twisted's behavior come afterwards.
ExecStart = ''
${settings.package}/bin/tahoe --eliot-destination ${eliotLog} run ${nodedir} -n -l- --pidfile=${pidfile}
${settings.package}/bin/tahoe --eliot-destination ${eliotLog} run --allow-stdin-close ${nodedir} -n -l- --pidfile=${pidfile}
'';
# Twisted wants non-blocking sockets:
NonBlocking = true;
# The rlimit on number of open files controls how many
# connections a particular storage server can accept (factoring
# in the number of non-connection files the server needs open -
......@@ -240,6 +263,14 @@ in
# now. So it makes sense to have the limit be 2^15 right now.
LimitNOFILE = 32768;
};
unitConfig = {
# Our config doesn't know how to bind all of its sockets on its
# own so don't start without the systemd units that *do* know
# how to bind them.
Requires = [ "tahoe.${node}.socket" ];
};
preStart =
let
created = "${nodedir}.created";
......@@ -293,7 +324,7 @@ in
isSystemUser = true;
group = "tahoe.${node}";
});
users.groups = flip mapAttrs' cfg.introducers (node: _:
users.groups = flip mapAttrs' cfg.nodes (node: _:
nameValuePair "tahoe.${node}" {
});
})
......
......@@ -32,7 +32,7 @@ CHECKOUT="${HOME}/PrivateStorageio"
# This is the address of the git remote where we can get the latest
# PrivateStorageio.
REPO="https://whetstone.privatestorage.io/privatestorage/PrivateStorageio.git"
REPO="https://whetstone.private.storage/privatestorage/PrivateStorageio.git"
if [ -e "${CHECKOUT}" ]; then
# It exists already so just make sure it contains the latest changes from
......@@ -79,7 +79,11 @@ ssh -o StrictHostKeyChecking=no "$(hostname).$(domainname)" ":"
#
# So instead, import our nixpkgs which forces it to be instantiated in the
# store, then ask for its path, then set NIX_PATH to that.
export NIX_PATH="nixpkgs=$(nix eval "(import ${CHECKOUT}/nixpkgs.nix { }).path")"
# Two lines since 'export' masks 'set -e'
# See https://mywiki.wooledge.org/BashFAQ/105#line-204
NIX_PATH="nixpkgs=$(nix --extra-experimental-features nix-command eval --impure --expr "(import ${CHECKOUT}/nixpkgs.nix { }).path")"
export NIX_PATH
# Attempt to update just this host. Choose the morph grid definition matching
# the grid we belong to and limit the morph deployment update to the host
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.