Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • privatestorage/PrivateStorageio
  • tomprince/PrivateStorageio
2 results
Show changes
Commits on Source (1003)
Showing
with 963 additions and 39 deletions
# Define rules for a job that should run for events related to a merge request
# - merge request is opened, a new commit is pushed to its branch, etc. This
# definition does nothing by itself but can be referenced by jobs that want to
# run in this condition.
.merge_request_rules: &RUN_ON_MERGE_REQUEST
rules:
# If the pipeline is triggered by a merge request event then we should
# run.
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
# If the pipeline is triggered by anything else then we should not run.
- when: "never"
# As above, but rules for running when the scheduler triggers the pipeline.
.schedule_rules: &RUN_ON_SCHEDULE
rules:
# There are multiple schedules so make sure this one is for us. The
# `SCHEDULE_TARGET` variable is explicitly, manually set by us in the
# schedule configuration.
- if: '$SCHEDULE_TARGET != $CI_JOB_NAME'
when: "never"
# Make sure this is actually a scheduled run
- if: '$CI_PIPELINE_SOURCE != "schedule"'
when: "never"
# Conditions look good: run.
- when: "always"
stages:
- "build"
- "deploy"
default:
# Guide the choice of an appropriate runner for all these jobs.
# https://docs.gitlab.com/ee/ci/runners/#runner-runs-only-tagged-jobs
......@@ -5,29 +38,38 @@ default:
- "nixos"
- "shell"
variables:
# https://docs.gitlab.com/ee/ci/runners/configure_runners.html#job-stages-attempts
GET_SOURCES_ATTEMPTS: 10
docs:
<<: *RUN_ON_MERGE_REQUEST
stage: "build"
script:
- "nix-build docs.nix"
- "cp --recursive --no-preserve=mode result/docs/. docs/build/"
- "nix-build --attr docs --out-link result-docs"
# GitLab wants to lchown artifacts. It can't do that to store paths. Get
# a copy of the docs outside of the store.
- "cp --recursive --no-preserve=mode ./result-docs/docs ./docs-build/"
artifacts:
paths:
- "docs/build/"
- "./docs-build/"
expose_as: "documentation"
unit-tests:
stage: "test"
<<: *RUN_ON_MERGE_REQUEST
stage: "build"
script:
- "nix-shell --run 'nix-build nixos/unit-tests.nix' && cat result"
- "nix-build --attr unit-tests && cat result"
.morph-build: &MORPH_BUILD
stage: "test"
<<: *RUN_ON_MERGE_REQUEST
timeout: "3 hours"
stage: "build"
script:
- |
set -x
# GRID is set in one of the "instantiations" of this job template.
nix-shell --run "morph build --show-trace morph/grid/${GRID}/grid.nix"
nix-shell --pure --run "morph build --show-trace morph/grid/${GRID}/grid.nix"
morph-build-localdev:
......@@ -39,13 +81,17 @@ morph-build-localdev:
- |
# The local grid configuration is *almost* complete enough to build. It
# just needs this tweak.
sed -i 's/undefined/\"unundefined\"/' morph/grid/${GRID}/public-keys/users.nix
echo '{}' > morph/grid/${GRID}/public-keys/users.nix
morph-build-testing:
morph-build-staging:
<<: *MORPH_BUILD
variables:
GRID: "testing"
morph-build-hro-cloud:
<<: *MORPH_BUILD
variables:
GRID: "hro-cloud"
morph-build-production:
<<: *MORPH_BUILD
......@@ -54,7 +100,8 @@ morph-build-production:
vulnerability-scan:
stage: "test"
<<: *RUN_ON_MERGE_REQUEST
stage: "build"
script:
- "ci-tools/vulnerability-scan security-report.json"
- "ci-tools/count-vulnerabilities <security-report.json"
......@@ -65,31 +112,110 @@ vulnerability-scan:
system-tests:
stage: "test"
<<: *RUN_ON_MERGE_REQUEST
timeout: "3 hours"
stage: "build"
script:
- "nix-shell --run 'nix-build nixos/system-tests.nix'"
- "nix-shell --pure --run 'nix-build --attr system-tests'"
# A template for a job that can update one of the grids.
.update-grid: &UPDATE_GRID
stage: "deploy"
script: |
env --ignore-environment - NIX_PATH=$NIX_PATH GITLAB_USER_LOGIN=$GITLAB_USER_LOGIN CI_JOB_NAME=$CI_JOB_NAME CI_PIPELINE_SOURCE=$CI_PIPELINE_SOURCE CI_COMMIT_BRANCH=$CI_COMMIT_BRANCH ./ci-tools/update-grid-servers "${PRIVATESTORAGEIO_SSH_DEPLOY_KEY_PATH}" "${CI_ENVIRONMENT_NAME}"
# Update the staging deployment - only on a merge to the staging branch.
env --ignore-environment - \
NIX_PATH="$NIX_PATH" \
GITLAB_USER_LOGIN="$GITLAB_USER_LOGIN" \
CI_JOB_NAME="$CI_JOB_NAME" \
CI_PIPELINE_SOURCE="$CI_PIPELINE_SOURCE" \
CI_COMMIT_BRANCH="$CI_COMMIT_BRANCH" \
./ci-tools/update-grid-servers "${PRIVATESTORAGEIO_SSH_DEPLOY_KEY_PATH}" "${CI_ENVIRONMENT_NAME}"
# Update the staging deployment - only on a commit to the develop branch.
update-staging:
<<: *UPDATE_GRID
only:
- "staging"
# https://docs.gitlab.com/ee/ci/yaml/#rules
rules:
# https://docs.gitlab.com/ee/ci/yaml/index.html#rulesif
# https://docs.gitlab.com/ee/ci/jobs/job_control.html#cicd-variable-expressions
# https://docs.gitlab.com/ee/ci/variables/predefined_variables.html
- if: '$CI_COMMIT_BRANCH == "develop"'
environment:
# You can find some status information about environments in GitLab at
# https://whetstone.privatestorage.io/privatestorage/PrivateStorageio/-/environments.
name: "staging"
url: "https://privatestorage-staging.com/"
# Update the production deployment - only on a merge to the production branch.
# The URL controls where the "View Deployment" button for this environment
# will take you. The main website isn't controlled by this codebase so we
# don't point there. The monitoring system *is* controlled by this
# codebase and it also tells us lots of stuff about other things
# controlled by this codebase so that seems like a good place to land.
# Not that I make it a habit to visit the deployment using the GitLab
# button... Still, discoverability or something.
url: "https://monitoring.privatestorage-staging.com/"
# Update the production deployment - only on a commit to the production branch.
deploy-to-production:
<<: *UPDATE_GRID
only:
- "production"
# https://docs.gitlab.com/ee/ci/yaml/#rules
rules:
# https://docs.gitlab.com/ee/ci/yaml/index.html#rulesif
# https://docs.gitlab.com/ee/ci/jobs/job_control.html#cicd-variable-expressions
# https://docs.gitlab.com/ee/ci/variables/predefined_variables.html
- if: '$CI_COMMIT_BRANCH == "production"'
environment:
# See notes in `update-staging`.
name: "production"
url: "https://privatestorage.io/"
url: "https://monitoring.private.storage/"
# Update the hro-cloud deployment - only on a commit to the hro-cloud branch.
deploy-to-hro-cloud:
<<: *UPDATE_GRID
# https://docs.gitlab.com/ee/ci/yaml/#rules
rules:
# https://docs.gitlab.com/ee/ci/yaml/index.html#rulesif
# https://docs.gitlab.com/ee/ci/jobs/job_control.html#cicd-variable-expressions
# https://docs.gitlab.com/ee/ci/variables/predefined_variables.html
- if: '$CI_COMMIT_BRANCH == "hro-cloud"'
environment:
# See notes in `update-staging`.
name: "hro-cloud"
url: "https://monitoring.deerfield.leastauthority.com/"
update-nixpkgs:
<<: *RUN_ON_SCHEDULE
stage: "build"
script:
- |
./ci-tools/with-ssh-agent \
./ci-tools/update-nixpkgs \
"$CI_SERVER_URL" \
"$CI_SERVER_HOST" \
"$CI_PROJECT_PATH" \
"$CI_PROJECT_ID" \
"$CI_DEFAULT_BRANCH"
update-production:
<<: *RUN_ON_SCHEDULE
stage: "build"
script:
- |
./ci-tools/update-production \
"$CI_SERVER_URL" \
"$CI_PROJECT_ID" \
"develop" \
"production"
update-hro-cloud:
<<: *RUN_ON_SCHEDULE
stage: "build"
script:
- |
./ci-tools/update-production \
"$CI_SERVER_URL" \
"$CI_PROJECT_ID" \
"develop" \
"hro-cloud"
Deployment notes
================
- 2023-06-19
ZKAPAuthorizer's Tahoe-LAFS plugin name changed from "privatestorageio-zkapauthz-v1" to "privatestorageio-zkapauthz-v2".
This causes Tahoe-LAFS to use a different filename to persist the plugin's Foolscap fURL.
To preserve the original fURL value (required) each storage node needs this command run before the deployment::
cp /var/db/tahoe-lafs/storage/private/storage-plugin.privatestorageio-zkapauthz-v{1,2}.furl
- 2023-04-19
The team switched from Slack to Zulip.
For the monitoring notifications to reach Zulip, a webhook bot has to be created in Zulip and a secret URL has to be constructed as described in `https://zulip.com/integrations/doc/grafana`_ and added to the ``private_keys`` directory (See ``grid/local/private-keys/grafana-zulip-url`` for an example).
Find the secret URL for production at `https://my.1password.com/vaults/7flqasy5hhhmlbtp5qozd3j4ga/allitems/rb22ipb6gvokohzq2d2hhv6t6u`_.
- 2021-12-20
`https://whetstone.private.storage/privatestorage/privatestorageops/-/issues/399`_ requires moving the PaymentServer database on the ``payments`` host onto a new dedicated filesystem.
Follow these steps *before* deploying this version of PrivateStorageio:
0. Deploy the `PrivateStorageOps change <https://whetstone.private.storage/privatestorage/privatestorageops/-/merge_requests/169>`_ that creates a new dedicated volume.
1. Put a disk label on the new dedicated volume ::
nix-shell -p parted --run 'parted /dev/nvme1n1 mklabel msdos'
2. Put a properly aligned partition in the new disk label ::
nix-shell -p parted --run 'parted /dev/nvme1n1 mkpart primary ext2 4096s 4G'
3. Create a labeled filesystem on the partition ::
mkfs.ext4 -L zkapissuer-data /dev/nvme1n1p1
4. Deploy the PrivateStorageio update.
5. Move the database file to the new location ::
mv -iv /var/lib/zkapissuer/vouchers.sqlite3 /var/lib/zkapissuer-v2
6. Clean up the old state directory ::
rm -ir /var/lib/zkapissuer
7. Start the PaymentServer service (not running because its path assertions were not met earlier) ::
systemctl start zkapissuer
- 2021-10-12 The secret in ``private-keys/grafana-slack-url`` needs to be changed to remove the ``SLACKURL=`` prefix.
- 2021-09-30 `Enable alerting <https://whetstone.private.storage/privatestorage/PrivateStorageio/-/merge_requests/185>`_ needs a secret in ``private-keys/grafana-slack-url`` looking like the template in ``morph/grid/local/private-keys/grafana-slack-url`` and pointing to the secret API endpoint URL saved in `this 1Password entry <https://privatestorage.1password.com/vaults/7flqasy5hhhmlbtp5qozd3j4ga/allitems/cgznskz2oix2tyx5xyntwaos5i>`_ (or create a new secret URL at https://www.slack.com/apps/A0F7XDUAZ).
- 2021-09-07 `Manage access to payment metrics <https://whetstone.private.storage/privatestorage/PrivateStorageio/-/merge_requests/146>`_ requires moving and chown'ing the PaymentServer database on the ``payments`` host::
mkdir /var/lib/zkapissuer
mv /var/db/vouchers.sqlite3 /var/lib/zkapissuer/vouchers.sqlite3
chown -R zkapissuer:zkapissuer /var/lib/zkapissuer
chmod 750 /var/lib/zkapissuer
chmod 640 /var/lib/zkapissuer/vouchers.sqlite3
Project Hosting Moved
=====================
This project can now be found at https://whetstone.privatestorage.io/opensource/PrivateStorageio
PrivateStorageio
================
......@@ -13,12 +8,13 @@ Documentation
There is documentation for:
* Operators/Admins: ``docs/ops/README.rst``
* Developers: ``docs/dev/README.rst``
* Operators/Admins: `<docs/source/ops/README.rst>`_
* Developers: `<docs/source/dev/README.rst>`_
The documentation can be built using this command::
$ nix-build docs.nix
The documentation is also built on and published by CI.
The documentation is also built on and published by CI:
Navigate to the `list of finished jobs <https://whetstone.private.storage/privatestorage/PrivateStorageio/-/jobs>`_ and download the artefact of the latest ``docs`` build.
#!/usr/bin/env bash
set -euo pipefail
KEY=$1
shift
DOMAIN=$1
shift
PRODUCT_ID=$(
curl https://api.stripe.com/v1/products \
-u "${KEY}:" \
-d "name=30 GB-months" \
-d "description=30 GB-months of Private.Storage storage × time" \
-d "statement_descriptor=PRIVATE STORAGE" \
-d "url=https://${DOMAIN}/" |
jp --unquoted id
)
echo "Product: $PRODUCT_ID"
PRICE_ID=$(
curl https://api.stripe.com/v1/prices \
-u "${KEY}:" \
-d "currency=USD" \
-d "unit_amount=650" \
-d "tax_behavior=exclusive" \
-d "product=${PRODUCT_ID}" |
jp --unquoted id
)
echo "Price: $PRICE_ID"
LINK_URL=$(
curl https://api.stripe.com/v1/payment_links \
-u "${KEY}:" \
-d "line_items[0][price]=${PRICE_ID}" \
-d "line_items[0][quantity]=1" \
-d "after_completion[type]"=redirect \
-d "after_completion[redirect][url]"="https://${DOMAIN}/payment/success" |
jp --unquoted url
)
echo "Payment link: $LINK_URL"
#!/usr/bin/env bash
set -euo pipefail
KEY=$1
shift
DOMAIN=$1
shift
curl \
https://api.stripe.com/v1/webhook_endpoints \
-u "${KEY}:" \
-d url="https://payments.${DOMAIN}/v1/stripe/webhook" \
-d "enabled_events[]"="checkout.session.completed"
monitoring.deerfield.leastauthority.com,vps-50812a54.vps.ovh.net,51.38.134.175 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOIgegzAxXPhxFK8vglBlUAFTzUoCj5TxqcLS57NaL2l
payments.deerfield.leastauthority.com,vps-3cbcf174.vps.ovh.net,217.182.78.151 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFI32csriKoUUD3e813gcEAD5CCuf8rUnary70HfJMSr
storage001.deerfield.leastauthority.com,185.225.209.174 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKX9Ei+WdNVvIncHQZ9CdEXZeSj2zBM/NQEuqmMbep0A
storage002.deerfield.leastauthority.com,38.170.241.34 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK3TAQj5agAv9AOZQhE95vATQKcNbNZj5Y3xMb5cjzGZ
storage003.deerfield.leastauthority.com,ns3728736.ip-151-80-28.eu,151.80.28.108 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFsh9No4PT3hHDsY/07kDSRCg1Jse38n7GY0Rk9DnyPe
monitoring.privatestorage-staging.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINI9kvEBaOMvpWqcFH+6nFvRriBECKB4RFShdPiIMkk9
payments.privatestorage-staging.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK0eO/01VFwdoZzpclrmu656eaMkE19BaxtDdkkFHMa8
storage001.privatestorage-staging.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFP8L6OHCxq9XFd8ME8ZrCbmO5dGZDPH8I5dm0AwSGiN
storage001.privatestorage-staging.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIA6iWHO9/4s3h9VIpaxgD+rgj/OQh8+jupxBoOmie3St
......@@ -61,6 +61,11 @@ update_grid_nodes() {
domain=private.storage
;;
"hro-cloud")
grid_dir=./morph/grid/hro-cloud
domain=deerfield.leastauthority.com
;;
"staging")
grid_dir=./morph/grid/testing
domain=privatestorage-staging.com
......@@ -74,7 +79,7 @@ update_grid_nodes() {
# Find the names of all hosts that belong to this grid. This list includes
# one extra string, "network", which is morph configuration stuff and we need
# to filter out later.
nodes=$(nix eval --json "(builtins.concatStringsSep \" \" (builtins.attrNames (import $grid_dir/grid.nix)))" | jp --unquoted @)
nodes=$(nix --extra-experimental-features nix-command eval --impure --json --expr "(builtins.concatStringsSep \" \" (builtins.attrNames (import $grid_dir/grid.nix)))" | jp --unquoted @)
# Tell every server in the network to update itself.
for node in ${nodes}; do
......
#!/usr/bin/env nix-shell
#!nix-shell -i bash -p git curl python3
# ^^
# we need git to commit and push our changes
# we need curl to create the gitlab MR
# we need python to format the data as json
set -eux -o pipefail
main() {
# This is a base64-encoded OpenSSH-format SSH private key that we can use
# to push and pull with git over ssh.
local SSHKEY=$1
shift
# This is a GitLab authentication token we can use to make API calls onto
# GitLab.
local TOKEN=$1
shift
# This is the URL of the root of the GitLab API.
local SERVER_URL=$1
shift
# This is the hostname of the GitLab server (suitable for use in a Git
# remote).
local SERVER_HOST=$1
shift
# This is the "group/project"-style identifier for the project we're working
# with.
local PROJECT_PATH=$1
shift
# The GitLab id of the project (eg, from CI_PROJECT_ID in the CI
# environment).
local PROJECT_ID=$1
shift
# The name of the branch on which to base changes and which to target with
# the resulting merge request.
local DEFAULT_BRANCH=$1
shift
# Only proceed if we have an ssh-agent.
check_agent
# Pick a branch name into which to push our work.
local SOURCE_BRANCH="nixpkgs-upgrade-$(date +%Y-%m-%d)"
setup_git
checkout_source_branch "$SSHKEY" "$SERVER_HOST" "$PROJECT_PATH" "$DEFAULT_BRANCH" "$SOURCE_BRANCH"
build "result-before"
# If nothing changed, report this and exit without an error.
if ! update_nixpkgs; then
echo "No changes."
exit 0
fi
build "result-after"
local DIFF=$(compute_diff "./result-before" "./result-after")
commit_and_push "$SSHKEY" "$SOURCE_BRANCH" "$DIFF"
create_merge_request "$SERVER_URL" "$TOKEN" "$PROJECT_ID" "$DEFAULT_BRANCH" "$SOURCE_BRANCH" "$DIFF"
}
# Add the ssh key required to push and (maybe) pull to the ssh-agent. This
# may have a limited lifetime in the agent so operations that are going to
# require the key should refresh it immediately before starting.
refresh_ssh_key() {
local KEY_BASE64=$1
shift
# A GitLab CI/CD variable set for us to use.
echo "${KEY_BASE64}" | base64 -d | ssh-add -
}
# Make git usable by setting some global mandatory options.
setup_git() {
# We may not know the git/ssh server's host key yet. In that case, learn
# it and proceed.
export GIT_SSH_COMMAND="ssh -o StrictHostKeyChecking=accept-new"
git config --global user.email "update-bot@private.storage"
git config --global user.name "Update Bot"
}
# Return with an error if no ssh-agent is detected.
check_agent() {
# We require an ssh-agent to be available so we can put the ssh private
# key in it. The key is given to us in memory and we don't really want to
# put it on disk anywhere so an agent is the easiest way to make it
# available for git/ssh operations.
if [ ! -v SSH_AUTH_SOCK ]; then
echo "ssh-agent is required but missing, aborting."
exit 1
fi
}
# Make a fresh clone of the repository, make it our working directory, and
# check out the branch we intend to commit to (the "source" of the MR).
checkout_source_branch() {
local SSHKEY=$1
shift
local SERVER_HOST=$1
shift
local PROJECT_PATH=$1
shift
# The branch we'll start from.
local DEFAULT_BRANCH=$1
shift
# The name of our branch.
local BRANCH=$1
shift
# To avoid messing with the checkout we're running from (which GitLab
# tends to like to share across builds) clone it to a new temporary path.
git clone . working-copy
cd working-copy
# Make sure we know the name of a remote that points at the right place.
# Then use it to make sure the base branch is up-to-date. It usually
# should be already but in case it isn't we don't want to start from a
# stale revision.
git remote add upstream gitlab@"$SERVER_HOST":"$PROJECT_PATH".git
refresh_ssh_key "$SSHKEY"
git fetch upstream "$DEFAULT_BRANCH"
# Typically this tool runs infrequently enough that the branch doesn't
# already exist. However, as a convenience for developing on this tool
# itself, if it does already exist, wipe it and start fresh for greater
# predictability.
git branch -D "${BRANCH}" || true
# Then create a new branch starting from the mainline development branch.
git checkout -B "${BRANCH}" upstream/"$DEFAULT_BRANCH"
}
# Build all of the grids (the `morph` attribute of `default.nix`) and link the
# result to the given parameter. This will give us some material to diff.
build() {
# The name of the nix result symlink.
local RESULT=$1
shift
# The local grid can only build if you populate its users.
echo '{}' > morph/grid/local/public-keys/users.nix
nix-build -A morph -o "$RESULT"
}
# Perform the actual dependency update. If there are no changes, exit with an
# error code.
update_nixpkgs() {
# Spawn *another* nix-shell that has the *other* update-nixpkgs tool.
# Should sort out this mess sooner rather than later... Also, tell the
# tool (running from another checkout) to operate on this clone's package
# file instead of the one that's part of its own checkout.
nix-shell ../shell.nix --run 'update-nixpkgs ${PWD}/nixpkgs.json'
# Signal a kind of error if we did nothing (expected in the case where
# nixpkgs hasn't changed since we last ran).
if git diff --exit-code; then
return 1
fi
}
# Return a description of the package changes resulting from the dependency
# update.
compute_diff() {
local LEFT=$1
shift
local RIGHT=$1
shift
nix --extra-experimental-features nix-command store diff-closures "$LEFT" "$RIGHT"
}
# Commit and push all changes in the working tree along with a description of
# the package changes.
commit_and_push() {
local SSHKEY=$1
shift
local BRANCH=$1
shift
local DIFF=$1
shift
git commit -am "bump nixpkgs
```
$DIFF
```
"
refresh_ssh_key "$SSHKEY"
git push --force upstream "${BRANCH}:${BRANCH}"
}
# Create a GitLab MR for the branch we just pushed, including a description of
# the package changes it implies.
create_merge_request() {
local SERVER_URL=$1
shift
local TOKEN=$1
shift
local PROJECT_ID=$1
shift
# The target branch of the MR.
local TARGET_BRANCH=$1
shift
# The source branch of the MR.
local SOURCE_BRANCH=$1
shift
local DIFF=$1
shift
local BODY=$(python3 -c '
import sys, json, re
def rewrite_escapes(s):
# `nix store diff-closures` output is fancy and includes color codes and
# such. That looks a bit less than nice in a markdown-formatted comment so
# strip all of it. If we wanted to be fancy we could rewrite it in a
# markdown friendly way (eg using html).
return re.sub(r"\x1b\[[^m]*m", "", s)
print(json.dumps({
"id": sys.argv[1],
"target_branch": sys.argv[2],
"source_branch": sys.argv[3],
"remove_source_branch": True,
"title": "bump nixpkgs version",
"description": f"```\n{rewrite_escapes(sys.argv[4])}\n```",
}))
' "$PROJECT_ID" "$TARGET_BRANCH" "$SOURCE_BRANCH" "$DIFF")
curl --verbose -X POST --data "${BODY}" --header "Content-Type: application/json" --header "PRIVATE-TOKEN: ${TOKEN}" "${SERVER_URL}/api/v4/projects/${PROJECT_ID}/merge_requests"
}
# Pull the private ssh key and GitLab token from the environment here so we
# can work with them as arguments everywhere else. They're passed to us in
# the environment because *maybe* this is *slightly* safer than passing them
# in argv.
SSHKEY="$UPDATE_NIXPKGS_PRIVATE_SSHKEY_BASE64"
TOKEN="$UPDATE_NIXPKGS_PRIVATE_TOKEN"
# Before proceeding, remove the secrets from our environment so we don't pass
# them to child processes - none of which need them.
unset UPDATE_NIXPKGS_PRIVATE_SSHKEY_BASE64 UPDATE_NIXPKGS_PRIVATE_TOKEN
main "$SSHKEY" "$TOKEN" "$@"
#!/usr/bin/env nix-shell
#!nix-shell -i bash -p git curl python3
set -eux -o pipefail
main() {
local TOKEN=$1
shift
local SERVER_URL=$1
shift
local PROJECT_ID=$1
shift
local SOURCE_BRANCH=$1
shift
local TARGET_BRANCH=$1
shift
# Make sure the things we want to talk about are locally known. GitLab
# seems to prefer to know about as few refs as possible.
checkout_git_ref "$SOURCE_BRANCH"
checkout_git_ref "$TARGET_BRANCH"
# If there have been no changes we'll just abandon this update.
if ! ensure_changes "$SOURCE_BRANCH" "$TARGET_BRANCH"; then
echo "No changes."
exit 0
fi
local NOTES=$(describe_update "$SOURCE_BRANCH" "$TARGET_BRANCH")
create_merge_request "$TOKEN" "$SERVER_URL" "$PROJECT_ID" "$SOURCE_BRANCH" "$TARGET_BRANCH" "$NOTES"
}
checkout_git_ref() {
local REF=$1
shift
git fetch origin "$REF"
}
ensure_changes() {
local SOURCE_BRANCH=$1
shift
local TARGET_BRANCH=$1
shift
if [ "$(git rev-parse origin/"$SOURCE_BRANCH")" = "$(git rev-parse origin/"$TARGET_BRANCH")" ]; then
return 1
fi
}
describe_merge_request() {
git show $rev | grep 'See merge request' | sed -e 's/See merge request //' | tr -d '[:space:]'
}
describe_merge_requests() {
local RANGE=$1
shift
local TARGET=$1
shift
# Find all of the relevant merge revisions
local onelines=$(git log --merges --first-parent -m --oneline "$RANGE" | grep "into '$TARGET'")
# Describe each merge revision
local IFS=$'\n'
for line in $onelines; do
local rev=$(echo "$line" | cut -d ' ' -f 1)
echo -n "* "
describe_merge_request $rev
echo
done
}
describe_update() {
local SOURCE_BRANCH=$1
shift
local TARGET_BRANCH=$1
shift
# Since the target (production or hro-cloud) should not diverge from the source
# (develop) it is fine to use `..` instead of `...` in the git ranges here.
# `...` encounters problems related to discovering the merge base because
# of the way GitLab manages the git checkout on CI (I think).
local NOTES=$(git diff origin/"$TARGET_BRANCH"..origin/"$SOURCE_BRANCH" -- DEPLOYMENT-NOTES.rst)
# There often are no notes and that makes for boring reading so toss in a
# diffstat as well.
local DIFFSTAT=$(git diff --stat origin/"$TARGET_BRANCH"..origin/"$SOURCE_BRANCH")
local WHEN=$(git log --max-count=1 --format='%cI' origin/"$TARGET_BRANCH")
# Describe all of the MRs that were merged into the source branch that are
# about to be merged into the target branch.
local MR=$(describe_merge_requests origin/"$TARGET_BRANCH"..origin/"$SOURCE_BRANCH" "$SOURCE_BRANCH")
echo "\
Changes from $SOURCE_BRANCH since $WHEN
=======================================
Deployment Notes
----------------
\`\`\`
$NOTES
\`\`\`
Included Merge Requests
-----------------------
$MR
Diff Stat
---------
\`\`\`
$DIFFSTAT
\`\`\`
"
}
create_merge_request() {
local TOKEN=$1
shift
local SERVER_URL=$1
shift
local PROJECT_ID=$1
shift
# THe source branch of the MR.
local SOURCE_BRANCH=$1
shift
# The target branch of the MR.
local TARGET_BRANCH=$1
shift
local NOTES=$1
shift
local BODY=$(python3 -c '
import sys, json
print(json.dumps({
"id": sys.argv[1],
"source_branch": sys.argv[2],
"target_branch": sys.argv[3],
"remove_source_branch": True,
"title": f"update {sys.argv[3]}",
"description": sys.argv[4],
}))
' "$PROJECT_ID" "$SOURCE_BRANCH" "$TARGET_BRANCH" "$NOTES")
curl --verbose -X POST --data "${BODY}" --header "Content-Type: application/json" --header "PRIVATE-TOKEN: ${TOKEN}" "${SERVER_URL}/api/v4/projects/${PROJECT_ID}/merge_requests"
}
# Pull the GitLab token from the environment here so we can work with them as
# arguments everywhere else. They're passed to us in the environment because
# *maybe* this is *slightly* safer than passing them in argv.
#
# The name is slightly weird because it is shared with the update-nixpkgs job.
TOKEN="$UPDATE_NIXPKGS_PRIVATE_TOKEN"
# Before proceeding, remove the secrets from our environment so we don't pass
# them to child processes - none of which need them.
unset UPDATE_NIXPKGS_PRIVATE_TOKEN
main "$TOKEN" "$@"
......@@ -32,6 +32,12 @@ else
fi
'
# The version (1.9.6) of vulnix in nixos-21.05 incorrectly collapses
# derivations with the same name+version, but different sets of patches
# applied. Therefore, we use a recent nixos-unstable version that has a newer
# version of vulnix included.
export NIX_PATH=nixpkgs=https://api.github.com/repos/NixOS/nixpkgs/tarball/ee084c02040e864eeeb4cf4f8538d92f7c675671
# vulnix exits with an error status if there are vulnerabilities. We told
# GitLab to allow this by setting `allow_failure` to true in the GitLab CI
# config. vulnix exit status indicates what vulnix thinks happened. If we
......
#!/usr/bin/env nix-shell
#!nix-shell -i bash -p openssh
# This minimal helper just runs another process with an ssh-agent available to
# it. ssh-agent itself does most of that work for us so the main benefit of
# the script is that it guarantees ssh-agent is available for us to run.
# Just give ssh-agent the commmand and it will run it and then exit when it
# does. This is a nice way to do process management so as to avoid leaking
# ssh-agents. Just in case cleanup fails for some reason, we'll also give
# keys a lifetime with `-t <seconds>` so secrets don't say in memory
# indefinitely. Note this means the process run by ssh-agent must finish its
# key-requiring operation within this number of seconds of adding the key.
ssh-agent -t 30 "$@"
{ pkgs ? import ./nixpkgs.nix { } }:
{
# Render the project documentation source to some presentation format (ie,
# html) with Sphinx.
docs = pkgs.callPackage ./docs.nix { };
# Run some system integration tests in VMs covering some of the software
# we're integrating (ie, application functionality).
system-tests = pkgs.callPackage ./nixos/system-tests.nix { };
# Run some unit tests of the Nix that ties all of these things together (ie,
# PrivateStorageio-internal library functionality).
unit-tests = pkgs.callPackage ./nixos/unit-tests.nix { };
# Build all grids into a single derivation. The derivation also has several
# attributes that are useful for exploring the configuration in a repl or
# with eval.
morph = pkgs.callPackage ./morph {};
}
{ pkgs ? import ./nixpkgs-2105.nix { } }:
pkgs.callPackage ./privatestorageio.nix { }
{ stdenv, lib, graphviz, plantuml, python3, sphinx }:
let
pyenv = python3.withPackages (ps: [ ps.sphinx ps.sphinxcontrib_plantuml ]);
in
stdenv.mkDerivation rec {
version = "0.0";
name = "privatestorageio-${version}";
src = lib.cleanSource ./.;
phases = [ "unpackPhase" "buildPhase" ];
depsBuildBuild = [
graphviz
plantuml
];
buildPhase = ''
${pyenv}/bin/sphinx-build -W docs/ $out/docs
'';
}
File moved
......@@ -20,7 +20,7 @@
# -- Project information -----------------------------------------------------
project = 'PrivateStorageio'
copyright = '2019, PrivateStorage.io, LLC'
copyright = '2021, PrivateStorage.io, LLC'
author = 'PrivateStorage.io, LLC'
# The short X.Y version
......@@ -38,8 +38,10 @@ release = '0.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.graphviz",
"sphinxcontrib.plantuml",
]
# Add any paths that contain templates here, relative to this directory.
......@@ -59,7 +61,7 @@ master_doc = 'index'
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
......@@ -86,7 +88,6 @@ html_theme_options = {
'logo': 'logo-ps.svg',
'description': "&nbsp;", # ugly hack to get some white space below the logo
'fixed_sidebar': True,
'extra_nav_links': {"Fork me on GitHub": "https://github.com/PrivateStorageio/PrivateStorageio"},
}
# Add any paths that contain custom static files (such as style sheets) here,
......
Developer documentation
=======================
Building
--------
The build system uses `Nix`_ which must be installed before anything can be built.
Start by setting up the development/operations environment::
$ nix-shell
Testing
-------
The test system uses `Nix`_ which must be installed before any tests can be run.
Unit tests are run using this command::
$ nix-build --attr unit-tests
Unit tests are also run on CI.
The system tests are run using this command::
$ nix-build --attr system-tests
The build requires > 10 GB of disk space,
and the VMs might be timing out on slow or busy machines.
If you run into timeouts,
try `raising the number of retries <https://whetstone.private.storage/privatestorage/PrivateStorageio/-/blob/e8233d2/nixos/modules/tests/run-introducer.py#L55-62>`_.
It is also possible go through the testing script interactively - useful for debugging::
$ nix-build --attr system-tests.private-storage.driver
This will give you a result symlink in the current directory.
Inside that is bin/nixos-test-driver which gives you a kind of REPL for interacting with the VMs.
The kind of `Python in this testScript <https://whetstone.private.storage/privatestorage/PrivateStorageio/-/blob/78881a3/nixos/modules/tests/private-storage.nix#L180>`_ is what you can enter into this REPL.
Consult the `official documentation on NixOS Tests <https://nixos.org/manual/nixos/stable/index.html#sec-nixos-tests>`_ for more information.
Updatings Pins
--------------
Nixpkgs
```````
To update the version of NixOS we deploy with, run::
nix-shell --run 'update-nixpkgs'
That will update ``nixpkgs.json`` to the latest release on the nixos release channel.
To update the channel, the script will need to be updated,
along with the filenames that have the channel in them.
To create a text summary of what an update changes - to put in Merge Requests, for example - run::
nix-build -A morph -o result-before
update-nixpkgs
nix-build -A morph -o result-after
nix-shell -p nixUnstable
nix --extra-experimental-features nix-command store diff-closures ./result-before/ ./result-after/
Gitlab Repositories
```````````````````
To update the version of packages we import from gitlab, run::
nix-shell --command 'update-gitlab-repo nixos/pkgs/<package>/repo.json'
That will update the package to point at the latest version of the project.\
The command uses branch and repository owner specified in the ``repo.json`` file,
but you can override them by passing the ``--branch`` or ``-owner`` arguments to the command.
A specific revision can also be pinned, by passing ``-rev``.
Interactions
------------
Storage-Time Purchase (ie Payment)
``````````````````````````````````
.. uml::
actor User as User
participant GridSync
participant ZKAPAuthorizer
database ZKAPAuthzDB as "ZKAPAuthorizer"
participant Browser
participant PaymentServer as "Payment Server"
database PaymentServerDB as "Payment Server"
participant WebServer as "Web Server"
participant Stripe
User -> GridSync : buy storage-time
activate User
GridSync -> GridSync : generate voucher
GridSync -> ZKAPAuthorizer : redeem voucher
activate ZKAPAuthorizer
ZKAPAuthorizer -> ZKAPAuthzDB : store voucher
ZKAPAuthorizer -> GridSync : acknowledge
GridSync -> Browser : open payment page
loop until redeemed
GridSync -> ZKAPAuthorizer : query voucher state
ZKAPAuthorizer -> GridSync : not paid
end
Browser -> WebServer : request payment form
WebServer -> Browser : payment form
Browser -> User : Payment form displayed
activate User
User -> Browser : Submit payment details
Browser -> Stripe : Submit payment details
alt payment details accepted
Stripe -> Browser : details okay, return card token
Browser -> PaymentServer : create charge using card token
PaymentServer -> Stripe : charge card using token
note left: the user has now paid for the service
Stripe -> PaymentServer : acknowledge
PaymentServer -> PaymentServerDB : store voucher paid state
else payment details rejected
Stripe -> Browser : payment failure
end
Browser -> User : payment processing results displayed
deactivate User
group repeat for each redemption group
ZKAPAuthorizer -> ZKAPAuthzDB : generate and store random tokens
ZKAPAuthorizer -> PaymentServer : redeem voucher with blinded tokens
PaymentServer -> ZKAPAuthorizer : return signatures for blinded tokens
ZKAPAuthorizer -> ZKAPAuthzDB : store unblinded signatures for tokens
note right: the user has now been authorized to use the service
end
deactivate ZKAPAuthorizer
loop until redeemed
GridSync -> ZKAPAuthorizer : query voucher state
ZKAPAuthorizer -> GridSync : fully redeemed
end
GridSync -> User : storage-time available displayed
deactivate User
Storage-Time Spending (ie Use)
``````````````````````````````
.. uml::
participant MagicFolder
participant TahoeLAFS as "Tahoe-LAFS"
participant ZKAPAuthorizer
database ZKAPAuthzDB as "ZKAPAuthorizer"
participant StorageNode as "Storage Node"
participant SpendingService as "Spending Service"
[-> MagicFolder: upload triggered
activate MagicFolder
MagicFolder -> TahoeLAFS : store some data
activate TahoeLAFS
TahoeLAFS -> ZKAPAuthorizer : store some data
activate ZKAPAuthorizer
loop until tokens accepted
ZKAPAuthorizer <- ZKAPAuthzDB : load some tokens
ZKAPAuthorizer -> StorageNode : store some data using these tokens
StorageNode -> SpendingService : spend these tokens
alt spent tokens
SpendingService -> StorageNode: already spent, rejected
StorageNode -> ZKAPAuthorizer: already spent, rejected
else fresh tokens
SpendingService -> StorageNode: accepted
end
end
StorageNode -> ZKAPAuthorizer: data stored
deactivate ZKAPAuthorizer
ZKAPAuthorizer -> ZKAPAuthzDB: discard spent tokens
ZKAPAuthorizer -> TahoeLAFS: data stored
deactivate TahoeLAFS
TahoeLAFS -> MagicFolder: data stored
deactivate MagicFolder
.. include::
../../morph/grid/local/README.rst
.. _Nix: https://nixos.org/nix
System Designs
--------------
.. toctree::
:maxdepth: 2
System Design Template <template>