# A NixOS module which can instantiate a Tahoe-LAFS storage server in the
# preferred configuration for the Private Storage grid.
{ pkgs, ourpkgs, lib, config, ... }:
let
  # Grab the configuration for this module for convenient access below.
  cfg = config.services.private-storage;
  storage-node-name = "storage";
  # TODO: This path copied from tahoe.nix.
  tahoe-base = "/var/db/tahoe-lafs";

  # The full path to the directory where the storage server will write
  # incident reports.
  incidents-dir = "${tahoe-base}/${storage-node-name}/logs/incidents";

  # The maximum age that will be allowed for incident reports.  See
  # tmpfiles.d(5) for the syntax.
  #
  # NOTE: This is promised by the service privacy policy.  It *may not* be
  # raised without following the process for updating the privacy policy.
  # Fallback to 29d if "monitoring" attribute is not available (currently
  # in the system tests)
  max-incident-age = toString(cfg.monitoring.policy.logRetentionSeconds or
                              (29 * (24 * 60 * 60))) + "s";

  fqdn = "${
    assert config.networking.hostName != null; config.networking.hostName
    }.${
    assert config.networking.domain != null; config.networking.domain
    }";
in
{
  imports = [
    # Load our tahoe-lafs module.  It is configurable in the way I want it to
    # be configurable.
    ./tahoe.nix
  ];

  options =
  { services.private-storage.enable = lib.mkEnableOption "private storage service";
    services.private-storage.tahoe.package = lib.mkOption
    { default = ourpkgs.privatestorage;
      type = lib.types.package;
      example = lib.literalExpression "pkgs.tahoelafs";
      description = ''
        The package to use for the Tahoe-LAFS daemon.
      '';
    };
    services.private-storage.publicAddress = lib.mkOption
    { default = "${fqdn}";
      type = lib.types.str;
      example = "storage.example.invalid";
      description = ''
        A publicly-visible address to use in Tahoe-LAFS advertisements for
        this storage service.
      '';
    };
    services.private-storage.introducerFURL = lib.mkOption
    { default = null;
      type = lib.types.nullOr lib.types.str;
      example = "pb://<tubid>@<location hint>/<swissnum>";
      description = ''
        A Tahoe-LAFS introducer node fURL at which this storage node should announce itself.
      '';
    };
    services.private-storage.publicStoragePort = lib.mkOption
    { default = 8898;
      type = lib.types.int;
      example = 8098;
      description = ''
        The port number on which to service storage clients.
      '';
    };
    services.private-storage.publicReadOnlyStoragePort = lib.mkOption
    { default = 8899;
      type = lib.types.int;
      example = 8099;
      description = ''
        The port number on which to service read-only storage clients.
      '';
    };
    services.private-storage.issuerRootURL = lib.mkOption
    { default = "https://issuer.${config.networking.domain}/";
      type = lib.types.str;
      example = "https://example.invalid/";
      description = ''
        The URL of the Ristretto issuer service to announce.
      '';
    };
    services.private-storage.ristrettoSigningKeyPath = lib.mkOption
    { type = lib.types.path;
      example = "/var/run/secrets/signing-key.private";
      description = ''
        The path to the Ristretto signing key for the service.
      '';
    };
    services.private-storage.passValue = lib.mkOption
    { default = null;
      type = lib.types.nullOr lib.types.int;
      example = lib.literalExample (1000 * 1000);
      description = ''
        The bytes component of the bytes×time value of a single pass which
        storage servers will use when making pricing decisions.
      '';
    };
  };

  # Define configuration based on values given for our options - starting with
  # the option that says whether this is even turned on.
  config = lib.mkIf cfg.enable
    {
      # A read-only storage service.  This allows read-only access for clients
      # that use Great Black Swamp.  There is no ZKAP/GBS integration yet so
      # this is the most we can do at the moment.
      services.tahoe.nodes."ro-${storage-node-name}" =
        { package = cfg.tahoe.package;
          sections =
            { client = if cfg.introducerFURL == null then {} else
                { "introducer.furl" = cfg.introducerFURL;
                };
              node =
                { nickname = "ro-${storage-node-name}";
                  "tub.port" = "tcp:${toString cfg.publicReadOnlyStoragePort}";
                  "tub.location" = "tcp:${cfg.publicAddress}:${toString cfg.publicReadOnlyStoragePort}";
                };
              storage =
                { enabled = true;
                  storage_dir = "/storage";
                  readonly = true;
                  force_foolscap = false;
                };
            };
        };
      # Tahoe nixos module brings along a single socket for the web api.
      # That's for the other storage node though.  Turn off the integration
      # with this one.
      systemd.services."tahoe.ro-storage".unitConfig.Requires = lib.mkForce [];

      services.tahoe.nodes."${storage-node-name}" =
        { package = cfg.tahoe.package;
          # Each attribute in this set corresponds to a section in the
          # tahoe.cfg file.  Attributes on those sets correspond to individual
          # assignments in those sections.
          #
          # We just populate this according to policy/preference of Private
          # Storage.
          sections =
            { client = if cfg.introducerFURL == null then {} else
                { "introducer.furl" = cfg.introducerFURL;
                };
              node =
                # XXX Should try to name that is unique across the grid.
                { nickname = "${storage-node-name}";

                  # We have the web port active because the CLI uses it and
                  # because it exposes a metrics endpoint for our monitoring
                  # system.  The actual port configuration lives in systemd so
                  # that it can order binding the socket correctly with other
                  # dependencies (which we can't reliably do with Tahoe
                  # without a bunch of other work).
                  "web.port" = "systemd:domain=INET:index=0";

                  # We have to tell Tahoe-LAFS where to listen for Foolscap
                  # connections for the storage protocol.  We have to tell it twice.
                  # First, in the syntax which it uses to listen.
                  "tub.port" = "tcp:${toString cfg.publicStoragePort}";

                  # Second, in the syntax it advertises to in the fURL.
                  "tub.location" = "tcp:${cfg.publicAddress}:${toString cfg.publicStoragePort}";
                };
              storage =
                { enabled = true;
                  # Put the storage where we have a lot of space configured.
                  storage_dir = "/storage";
                  # Turn on our plugin.
                  plugins = "privatestorageio-zkapauthz-v2";
                };
              "storageserver.plugins.privatestorageio-zkapauthz-v2" =
                { "ristretto-issuer-root-url" = cfg.issuerRootURL;
                  "ristretto-signing-key-path" = cfg.ristrettoSigningKeyPath;
                } // (
                  if cfg.passValue == null
                  then {}
                  else { "pass-value" = (toString cfg.passValue); }
                );
            };
        };

      # Let traffic destined for the storage node's Foolscap server through.
      networking.firewall.allowedTCPPorts = [ cfg.publicStoragePort cfg.publicReadOnlyStoragePort ];

      systemd.tmpfiles.rules =
        # Add a rule to prevent incident reports from accumulating indefinitely.
        # See tmpfiles.d(5) for the syntax.
        [ "d ${incidents-dir} 0755 root root ${max-incident-age} -"
        ];

      environment.systemPackages = [
        # Provide a useful tool for reporting about shares.
        ourpkgs.leasereport
      ];
    };
}