diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 236b47d7160f841abe9e56c59f8d3604d4bf2f5b..89fb733f772f3e557fb0c224b40f13eb5a430e46 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -35,7 +35,7 @@ cabal-build-and-test:
     # shortcoming of this approach is that the cache will grow without bounds
     # as our dependencies change.  It would be nice if there were a way to
     # trim stale dependencies from it periodically.
-    key: "v0-cabal"
+    key: "v1-cabal"
 
     # Even if a build fails (eg because of a test suite failure or because the
     # job timed out because building things took too long), there will be
@@ -56,7 +56,7 @@ cabal-build-and-test:
       # This is only done in CI configuration because non-CI users probably
       # want their local cabal configuration left alone.
       cat >cabal.project.local <<EOF
-      store-dir: $CI_PROJECT_DIR/.cabal
+      store-dir: $CI_PROJECT_DIR/.cabal/store
       remote-repo-cache: $CI_PROJECT_DIR/.cabal/packages
       EOF
       nix run .#cabal-test
diff --git a/app/Main.hs b/app/Main.hs
index 80adaaa9cc056d1d0c5b3a06a22c48bd4ff2c3fe..2b87ca658b7f582f4d56e79fe8ba82b372be32ce 100644
--- a/app/Main.hs
+++ b/app/Main.hs
@@ -1,8 +1,5 @@
 module Main where
 
-import qualified MyLib (someFunc)
-
 main :: IO ()
 main = do
     putStrLn "Hello, Haskell!"
-    MyLib.someFunc
diff --git a/cabal.project b/cabal.project
new file mode 100644
index 0000000000000000000000000000000000000000..c216d6e9ce25190cc1b5794854ce6eae5b300174
--- /dev/null
+++ b/cabal.project
@@ -0,0 +1,13 @@
+packages: .
+          -- These aren't released on hackage yet so we have to grab them
+          -- straight from the vcs host.  Probably should do some releases
+          -- soon.
+          https://whetstone.private.storage/privatestorage/tahoe-chk/-/archive/0.1.0.0/tahoe-chk-0.1.0.0.tar.gz
+          https://whetstone.private.storage/privatestorage/tahoe-great-black-swamp/-/archive/0.1.0.1/tahoe-great-black-swamp-0.1.0.1.tar.gz
+
+package zlib
+        -- Turn on discovery of the underlying zlib using pkg-config.  This
+        -- fixes build failures when the underlying zlib is not in the
+        -- traditional location but is discoverable with pkg-config.  It might
+        -- break non-pkg-config platforms.
+        flags: +pkg-config
diff --git a/flake.lock b/flake.lock
index 6c3fe1bf377e3c0ad07f8a1f529aedca608f73a9..b268d888768c5de6920c637dcf97bfc0f341961f 100644
--- a/flake.lock
+++ b/flake.lock
@@ -16,6 +16,38 @@
         "type": "github"
       }
     },
+    "flake-compat_2": {
+      "flake": false,
+      "locked": {
+        "lastModified": 1673956053,
+        "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
+        "owner": "edolstra",
+        "repo": "flake-compat",
+        "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
+        "type": "github"
+      },
+      "original": {
+        "owner": "edolstra",
+        "repo": "flake-compat",
+        "type": "github"
+      }
+    },
+    "flake-compat_3": {
+      "flake": false,
+      "locked": {
+        "lastModified": 1673956053,
+        "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
+        "owner": "edolstra",
+        "repo": "flake-compat",
+        "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
+        "type": "github"
+      },
+      "original": {
+        "owner": "edolstra",
+        "repo": "flake-compat",
+        "type": "github"
+      }
+    },
     "flake-utils": {
       "locked": {
         "lastModified": 1667395993,
@@ -46,6 +78,66 @@
         "type": "github"
       }
     },
+    "flake-utils_3": {
+      "locked": {
+        "lastModified": 1667395993,
+        "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
+        "owner": "numtide",
+        "repo": "flake-utils",
+        "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
+        "type": "github"
+      },
+      "original": {
+        "owner": "numtide",
+        "repo": "flake-utils",
+        "type": "github"
+      }
+    },
+    "flake-utils_4": {
+      "locked": {
+        "lastModified": 1676283394,
+        "narHash": "sha256-XX2f9c3iySLCw54rJ/CZs+ZK6IQy7GXNY4nSOyu2QG4=",
+        "owner": "numtide",
+        "repo": "flake-utils",
+        "rev": "3db36a8b464d0c4532ba1c7dda728f4576d6d073",
+        "type": "github"
+      },
+      "original": {
+        "owner": "numtide",
+        "repo": "flake-utils",
+        "type": "github"
+      }
+    },
+    "flake-utils_5": {
+      "locked": {
+        "lastModified": 1676283394,
+        "narHash": "sha256-XX2f9c3iySLCw54rJ/CZs+ZK6IQy7GXNY4nSOyu2QG4=",
+        "owner": "numtide",
+        "repo": "flake-utils",
+        "rev": "3db36a8b464d0c4532ba1c7dda728f4576d6d073",
+        "type": "github"
+      },
+      "original": {
+        "owner": "numtide",
+        "repo": "flake-utils",
+        "type": "github"
+      }
+    },
+    "flake-utils_6": {
+      "locked": {
+        "lastModified": 1676283394,
+        "narHash": "sha256-XX2f9c3iySLCw54rJ/CZs+ZK6IQy7GXNY4nSOyu2QG4=",
+        "owner": "numtide",
+        "repo": "flake-utils",
+        "rev": "3db36a8b464d0c4532ba1c7dda728f4576d6d073",
+        "type": "github"
+      },
+      "original": {
+        "owner": "numtide",
+        "repo": "flake-utils",
+        "type": "github"
+      }
+    },
     "gitignore": {
       "inputs": {
         "nixpkgs": [
@@ -68,12 +160,100 @@
         "type": "github"
       }
     },
+    "gitignore_2": {
+      "inputs": {
+        "nixpkgs": [
+          "tahoe-chk",
+          "hs-flake-utils",
+          "pre-commit-hooks",
+          "nixpkgs"
+        ]
+      },
+      "locked": {
+        "lastModified": 1660459072,
+        "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=",
+        "owner": "hercules-ci",
+        "repo": "gitignore.nix",
+        "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73",
+        "type": "github"
+      },
+      "original": {
+        "owner": "hercules-ci",
+        "repo": "gitignore.nix",
+        "type": "github"
+      }
+    },
+    "gitignore_3": {
+      "inputs": {
+        "nixpkgs": [
+          "tahoe-great-black-swamp",
+          "hs-flake-utils",
+          "pre-commit-hooks",
+          "nixpkgs"
+        ]
+      },
+      "locked": {
+        "lastModified": 1660459072,
+        "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=",
+        "owner": "hercules-ci",
+        "repo": "gitignore.nix",
+        "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73",
+        "type": "github"
+      },
+      "original": {
+        "owner": "hercules-ci",
+        "repo": "gitignore.nix",
+        "type": "github"
+      }
+    },
     "hs-flake-utils": {
       "inputs": {
         "flake-utils": "flake-utils_2",
         "nixpkgs": "nixpkgs",
         "pre-commit-hooks": "pre-commit-hooks"
       },
+      "locked": {
+        "lastModified": 1681762240,
+        "narHash": "sha256-+PLx9xHBvV70dA7Gy/+YTH1w3PcSOrGV0z0rGxts8jU=",
+        "ref": "main",
+        "rev": "a51e591b7fdf8881ac0237452691df7b1aceecd3",
+        "revCount": 10,
+        "type": "git",
+        "url": "https://whetstone.private.storage/jcalderone/hs-flake-utils.git"
+      },
+      "original": {
+        "ref": "main",
+        "type": "git",
+        "url": "https://whetstone.private.storage/jcalderone/hs-flake-utils.git"
+      }
+    },
+    "hs-flake-utils_2": {
+      "inputs": {
+        "flake-utils": "flake-utils_4",
+        "nixpkgs": "nixpkgs_2",
+        "pre-commit-hooks": "pre-commit-hooks_2"
+      },
+      "locked": {
+        "lastModified": 1677773826,
+        "narHash": "sha256-xJmOtHugr4k2zNhP/AF6JdIUnIEyM+TEspLn2n5kloc=",
+        "ref": "main",
+        "rev": "d3a83fdd9563546ca41771186427638e685a2e2b",
+        "revCount": 9,
+        "type": "git",
+        "url": "https://whetstone.private.storage/jcalderone/hs-flake-utils.git"
+      },
+      "original": {
+        "ref": "main",
+        "type": "git",
+        "url": "https://whetstone.private.storage/jcalderone/hs-flake-utils.git"
+      }
+    },
+    "hs-flake-utils_3": {
+      "inputs": {
+        "flake-utils": "flake-utils_6",
+        "nixpkgs": "nixpkgs_3",
+        "pre-commit-hooks": "pre-commit-hooks_3"
+      },
       "locked": {
         "lastModified": 1677773826,
         "narHash": "sha256-xJmOtHugr4k2zNhP/AF6JdIUnIEyM+TEspLn2n5kloc=",
@@ -121,6 +301,70 @@
         "type": "github"
       }
     },
+    "nixpkgs-stable_2": {
+      "locked": {
+        "lastModified": 1673800717,
+        "narHash": "sha256-SFHraUqLSu5cC6IxTprex/nTsI81ZQAtDvlBvGDWfnA=",
+        "owner": "NixOS",
+        "repo": "nixpkgs",
+        "rev": "2f9fd351ec37f5d479556cd48be4ca340da59b8f",
+        "type": "github"
+      },
+      "original": {
+        "owner": "NixOS",
+        "ref": "nixos-22.11",
+        "repo": "nixpkgs",
+        "type": "github"
+      }
+    },
+    "nixpkgs-stable_3": {
+      "locked": {
+        "lastModified": 1673800717,
+        "narHash": "sha256-SFHraUqLSu5cC6IxTprex/nTsI81ZQAtDvlBvGDWfnA=",
+        "owner": "NixOS",
+        "repo": "nixpkgs",
+        "rev": "2f9fd351ec37f5d479556cd48be4ca340da59b8f",
+        "type": "github"
+      },
+      "original": {
+        "owner": "NixOS",
+        "ref": "nixos-22.11",
+        "repo": "nixpkgs",
+        "type": "github"
+      }
+    },
+    "nixpkgs_2": {
+      "locked": {
+        "lastModified": 1677624842,
+        "narHash": "sha256-4DF9DbDuK4/+KYx0L6XcPBeDHUFVCtzok2fWtwXtb5w=",
+        "owner": "nixos",
+        "repo": "nixpkgs",
+        "rev": "d70f5cd5c3bef45f7f52698f39e7cc7a89daa7f0",
+        "type": "github"
+      },
+      "original": {
+        "owner": "nixos",
+        "ref": "nixos-22.11",
+        "repo": "nixpkgs",
+        "type": "github"
+      }
+    },
+    "nixpkgs_3": {
+      "locked": {
+        "lastModified": 1677624842,
+        "narHash": "sha256-4DF9DbDuK4/+KYx0L6XcPBeDHUFVCtzok2fWtwXtb5w=",
+        "owner": "nixos",
+        "repo": "nixpkgs",
+        "rev": "d70f5cd5c3bef45f7f52698f39e7cc7a89daa7f0",
+        "type": "github"
+      },
+      "original": {
+        "owner": "nixos",
+        "ref": "nixos-22.11",
+        "repo": "nixpkgs",
+        "type": "github"
+      }
+    },
     "pre-commit-hooks": {
       "inputs": {
         "flake-compat": "flake-compat",
@@ -149,14 +393,124 @@
         "type": "github"
       }
     },
+    "pre-commit-hooks_2": {
+      "inputs": {
+        "flake-compat": "flake-compat_2",
+        "flake-utils": [
+          "tahoe-chk",
+          "hs-flake-utils",
+          "flake-utils"
+        ],
+        "gitignore": "gitignore_2",
+        "nixpkgs": [
+          "tahoe-chk",
+          "hs-flake-utils",
+          "nixpkgs"
+        ],
+        "nixpkgs-stable": "nixpkgs-stable_2"
+      },
+      "locked": {
+        "lastModified": 1677722096,
+        "narHash": "sha256-7mjVMvCs9InnrRybBfr5ohqcOz+pyEX8m22C1XsDilg=",
+        "owner": "cachix",
+        "repo": "pre-commit-hooks.nix",
+        "rev": "61a3511668891c68ebd19d40122150b98dc2fe3b",
+        "type": "github"
+      },
+      "original": {
+        "owner": "cachix",
+        "repo": "pre-commit-hooks.nix",
+        "type": "github"
+      }
+    },
+    "pre-commit-hooks_3": {
+      "inputs": {
+        "flake-compat": "flake-compat_3",
+        "flake-utils": [
+          "tahoe-great-black-swamp",
+          "hs-flake-utils",
+          "flake-utils"
+        ],
+        "gitignore": "gitignore_3",
+        "nixpkgs": [
+          "tahoe-great-black-swamp",
+          "hs-flake-utils",
+          "nixpkgs"
+        ],
+        "nixpkgs-stable": "nixpkgs-stable_3"
+      },
+      "locked": {
+        "lastModified": 1677722096,
+        "narHash": "sha256-7mjVMvCs9InnrRybBfr5ohqcOz+pyEX8m22C1XsDilg=",
+        "owner": "cachix",
+        "repo": "pre-commit-hooks.nix",
+        "rev": "61a3511668891c68ebd19d40122150b98dc2fe3b",
+        "type": "github"
+      },
+      "original": {
+        "owner": "cachix",
+        "repo": "pre-commit-hooks.nix",
+        "type": "github"
+      }
+    },
     "root": {
       "inputs": {
         "flake-utils": "flake-utils",
         "hs-flake-utils": "hs-flake-utils",
+        "nixpkgs": [
+          "hs-flake-utils",
+          "nixpkgs"
+        ],
+        "tahoe-chk": "tahoe-chk",
+        "tahoe-great-black-swamp": "tahoe-great-black-swamp"
+      }
+    },
+    "tahoe-chk": {
+      "inputs": {
+        "flake-utils": "flake-utils_3",
+        "hs-flake-utils": "hs-flake-utils_2",
+        "nixpkgs": [
+          "hs-flake-utils",
+          "nixpkgs"
+        ]
+      },
+      "locked": {
+        "lastModified": 1681998540,
+        "narHash": "sha256-gtlHidh8+qMUdnTzlxLIGlCMavq4/TFQDmope33Cd2w=",
+        "ref": "refs/tags/0.1.0.0",
+        "rev": "799bab0c2b8b5f9cbd98e7dd7d6a77285c87f16b",
+        "revCount": 340,
+        "type": "git",
+        "url": "https://whetstone.private.storage/PrivateStorage/tahoe-chk"
+      },
+      "original": {
+        "ref": "refs/tags/0.1.0.0",
+        "type": "git",
+        "url": "https://whetstone.private.storage/PrivateStorage/tahoe-chk"
+      }
+    },
+    "tahoe-great-black-swamp": {
+      "inputs": {
+        "flake-utils": "flake-utils_5",
+        "hs-flake-utils": "hs-flake-utils_3",
         "nixpkgs": [
           "hs-flake-utils",
           "nixpkgs"
         ]
+      },
+      "locked": {
+        "lastModified": 1682446800,
+        "narHash": "sha256-Vxl4dLpoRp2svWTx0w74m7PeVPMlkSu/XOZAHccOUDs=",
+        "ref": "refs/tags/0.1.0.1",
+        "rev": "b81cc3fcdb0107d369a636fdc5a17cf174dff2ea",
+        "revCount": 143,
+        "type": "git",
+        "url": "https://whetstone.private.storage/PrivateStorage/tahoe-great-black-swamp"
+      },
+      "original": {
+        "ref": "refs/tags/0.1.0.1",
+        "type": "git",
+        "url": "https://whetstone.private.storage/PrivateStorage/tahoe-great-black-swamp"
       }
     }
   },
diff --git a/flake.nix b/flake.nix
index 3b0f855eaf5d60d691d08acae679b279af7a7463..8c70ef15153e6a444c51dee6bd182192295cb18f 100644
--- a/flake.nix
+++ b/flake.nix
@@ -6,6 +6,16 @@
     nixpkgs.follows = "hs-flake-utils/nixpkgs";
     flake-utils.url = github:numtide/flake-utils;
     hs-flake-utils.url = "git+https://whetstone.private.storage/jcalderone/hs-flake-utils.git?ref=main";
+
+    tahoe-chk = {
+      url = "git+https://whetstone.private.storage/PrivateStorage/tahoe-chk?ref=refs/tags/0.1.0.0";
+      inputs.nixpkgs.follows = "hs-flake-utils/nixpkgs";
+    };
+
+    tahoe-great-black-swamp = {
+      url = "git+https://whetstone.private.storage/PrivateStorage/tahoe-great-black-swamp?ref=refs/tags/0.1.0.1";
+      inputs.nixpkgs.follows = "hs-flake-utils/nixpkgs";
+    };
   };
 
   outputs = {
@@ -13,6 +23,8 @@
     nixpkgs,
     flake-utils,
     hs-flake-utils,
+    tahoe-chk,
+    tahoe-great-black-swamp,
   }: let
     ulib = flake-utils.lib;
     ghcVersion = "ghc8107";
@@ -27,10 +39,26 @@
         src = ./.;
         compilerVersion = ghcVersion;
         packageName = "gbs-downloader";
+        hsPkgsOverrides = import ./nix/haskell-packages.nix {
+          tahoe-chk = tahoe-chk.outputs.packages.${system}.default;
+          tahoe-great-black-swamp = tahoe-great-black-swamp.outputs.packages.${system}.default;
+          haskellLib = pkgs.haskell.lib;
+        };
       };
     in {
       checks = hslib.checks {};
-      devShells = hslib.devShells {};
+      devShells = hslib.devShells {
+        extraBuildInputs = pkgs:
+          with pkgs; [
+            # We configure cabal to use zlib:pkg-config so we better supply
+            # pkg-config in the dev shell or `cabal ...` builds will surely
+            # fail.
+            pkg-config
+
+            # And also that pesky zlib dependency.
+            zlib
+          ];
+      };
       packages = hslib.packages {};
       apps.hlint = hslib.apps.hlint {};
 
@@ -41,12 +69,22 @@
         program = "${
           pkgs.writeShellApplication {
             name = "cabal-build-and-test";
-            runtimeInputs = with pkgs; [pkg-config haskell.compiler.${ghcVersion} cabal-install];
+            # Only put packages with things that need to be on PATH here
+            # because that's all that runtimeInputs buys us.  Packages with
+            # different requirements need to be handled differently.
+            runtimeInputs = with pkgs; [
+              pkg-config
+              haskell.compiler.${ghcVersion}
+              cabal-install
+            ];
 
             text = ''
+              # Here we make zlib discoverable by pkg-config so cabal can find
+              # headers and stuff.
+              export PKG_CONFIG_PATH=${pkgs.lib.makeSearchPath "lib/pkgconfig" [pkgs.zlib.dev]}
+
               cabal update hackage.haskell.org
-              cabal build --enable-tests
-              eval "$(cabal list-bin gbs-downloader-tests)"
+              cabal run tests
             '';
           }
         }/bin/cabal-build-and-test";
diff --git a/gbs-downloader.cabal b/gbs-downloader.cabal
index 9f3f1429d93189a773a1b4213897c146133b7819..8d377001b6a0f7437ef008f13a615b2108909641 100644
--- a/gbs-downloader.cabal
+++ b/gbs-downloader.cabal
@@ -58,14 +58,24 @@ extra-doc-files: CHANGELOG.md
 -- extra-source-files:
 
 common warnings
-  ghc-options: -Wall
+  ghc-options: -Wall -Werror
+
+common language
+  -- LANGUAGE extensions used by modules in all targets.
+  default-extensions:
+    NamedFieldPuns
+    OverloadedStrings
+    RecordWildCards
+    TupleSections
 
 library
   -- Import common warning flags.
-  import:           warnings
+  import:
+    warnings
+    , language
 
   -- Modules exported by the library.
-  exposed-modules:  MyLib
+  exposed-modules:  Tahoe.Download
 
   -- Modules included in this library but not exported.
   -- other-modules:
@@ -74,7 +84,14 @@ library
   -- other-extensions:
 
   -- Other library packages from which modules are imported.
-  build-depends:    base ^>=4.14.3.0
+  build-depends:
+    , base                     ^>=4.14.3.0
+    , binary
+    , bytestring
+    , containers
+    , tahoe-chk
+    , tahoe-great-black-swamp
+    , text
 
   -- Directories containing source files.
   hs-source-dirs:   src
@@ -82,9 +99,11 @@ library
   -- Base language which the package is written in.
   default-language: Haskell2010
 
-executable gbs-downloader
+executable gbs-download
   -- Import common warning flags.
-  import:           warnings
+  import:
+    warnings
+    , language
 
   -- .hs or .lhs file containing the Main module.
   main-is:          Main.hs
@@ -108,16 +127,15 @@ executable gbs-downloader
 
 test-suite gbs-downloader-test
   -- Import common warning flags.
-  import:           warnings
+  import:
+    warnings
+    , language
 
   -- Base language which the package is written in.
   default-language: Haskell2010
 
   -- Modules included in this executable, other than Main.
-  -- other-modules:
-
-  -- LANGUAGE extensions used by modules in this package.
-  -- other-extensions:
+  other-modules:    Generators
 
   -- The interface type and version of the test suite.
   type:             exitcode-stdio-1.0
@@ -126,9 +144,20 @@ test-suite gbs-downloader-test
   hs-source-dirs:   test
 
   -- The entrypoint to the test suite.
-  main-is:          Main.hs
+  main-is:          Spec.hs
 
   -- Test dependencies.
   build-depends:
     , base            ^>=4.14.3.0
+    , base32
+    , binary
+    , bytestring
+    , containers
+    , crypto-api
     , gbs-downloader
+    , hedgehog
+    , tahoe-chk
+    , tasty
+    , tasty-hedgehog
+    , tasty-hunit
+    , text
diff --git a/nix/haskell-packages.nix b/nix/haskell-packages.nix
new file mode 100644
index 0000000000000000000000000000000000000000..175fc2091a3906e508c6b4d7e1146a01122494c1
--- /dev/null
+++ b/nix/haskell-packages.nix
@@ -0,0 +1,12 @@
+{
+  haskellLib,
+  tahoe-chk,
+  tahoe-great-black-swamp,
+}: hfinal: hprev: {
+  inherit tahoe-chk;
+  inherit tahoe-great-black-swamp;
+
+  # A broken dependency of a tahoe-great-black-swamp executable that we don't
+  # use.  Flip the broken bit so we can get a build.
+  language-ecmascript = haskellLib.unmarkBroken hprev.language-ecmascript;
+}
diff --git a/src/MyLib.hs b/src/MyLib.hs
deleted file mode 100644
index e657c4403f66f966da13d2027bf595d9673387f6..0000000000000000000000000000000000000000
--- a/src/MyLib.hs
+++ /dev/null
@@ -1,4 +0,0 @@
-module MyLib (someFunc) where
-
-someFunc :: IO ()
-someFunc = putStrLn "someFunc"
diff --git a/src/Tahoe/Download.hs b/src/Tahoe/Download.hs
new file mode 100644
index 0000000000000000000000000000000000000000..35d87188f5fb56222b3472f4d82df21e24d8d83f
--- /dev/null
+++ b/src/Tahoe/Download.hs
@@ -0,0 +1,154 @@
+{- | A high-level interface to downloading share data as bytes from storage
+ servers.
+-}
+module Tahoe.Download (LookupServer, DownloadError (..), discoverShares, download) where
+
+import Data.Binary (decodeOrFail)
+import qualified Data.ByteString.Lazy as LB
+import Data.Either (isRight, rights)
+import Data.List (foldl')
+import qualified Data.Map.Strict as Map
+import qualified Data.Set as Set
+import qualified Tahoe.CHK
+import Tahoe.CHK.Capability (Reader (..), Verifier (..))
+import qualified Tahoe.CHK.Encrypt
+import Tahoe.CHK.Server (StorageServer (..), StorageServerAnnouncement (..), StorageServerID, URL)
+import Tahoe.CHK.Types (ShareNum, StorageIndex)
+
+{- | An unrecoverable problem arose while attempting to download and/or read
+ some application data.
+-}
+data DownloadError
+    = -- | The configuration included no candidate servers from which to download.
+      NoConfiguredServers
+    | -- | Across all of the configured servers, none were actually connectable.
+      NoReachableServers
+    | -- | Across all of the configured servers, fewer than the required
+      -- number of shares were found. XXX Could split this into the different
+      -- cases - did not locate enough shares, did not download enough shares,
+      -- did not verify enough shares
+      NotEnoughShares
+        { notEnoughSharesNeeded :: Int
+        , notEnoughSharesFound :: Int
+        }
+    | -- | Across all of the shares that we could download, fewer than the
+      -- required number could actually be decoded.
+      NotEnoughDecodedShares
+        { notEnoughDecodedSharesNeeded :: Int
+        , notEnoughDecodedSharesFound :: Int
+        }
+    | -- | Enough syntactically valid shares were recovered but they could not
+      -- be interpreted.
+      ShareDecodingFailed
+    | -- | An attempt was made to download a share but no servers were given for
+      -- the download.
+      NoServers
+    deriving (Eq, Ord, Show)
+
+{- | An problem arose while attempting to discover the shares held on a
+ particular server.
+-}
+data DiscoverError
+    = -- | An announcement did not include a location for a connection attempt.
+      StorageServerLocationUnknown
+    | StorageServerUnreachable
+    deriving (Eq, Ord, Show)
+
+-- TODO The result might need to be in IO in case the URL indicates a
+-- Tor-based route to the server.  In this case we might need to launch a Tor
+-- daemon or connect to a running Tor daemon or at least set up a new Tor
+-- circuit.  All of which require I/O.  But we can always refactor later!
+type LookupServer = URL -> Maybe StorageServer
+
+{- | Recover the application data associated with a given capability from the
+ given servers, if possible.
+-}
+download ::
+    -- | Information about the servers from which to consider downloading shares
+    -- representing the application data.
+    Map.Map StorageServerID StorageServerAnnouncement ->
+    -- | The read capability for the application data.
+    Reader ->
+    -- | Get functions for interacting with a server given its URL.
+    LookupServer ->
+    -- | Either a description of how the recovery failed or the recovered
+    -- application data.
+    IO (Either DownloadError LB.ByteString)
+download servers cap@Reader{readKey, verifier = Verifier{..}} lookupServer =
+    case Map.toList servers of
+        [] -> pure . Left $ NoConfiguredServers
+        serverList -> do
+            -- Ask each server for all shares it has.
+            discovered <- rights <$> mapM (discoverShares lookupServer storageIndex) serverList :: IO [(StorageServer, Set.Set ShareNum)]
+            if null discovered
+                then pure $ Left NoReachableServers
+                else
+                    if (fromIntegral required >) . countDistinctShares $ discovered
+                        then pure $ Left NotEnoughShares{notEnoughSharesNeeded = fromIntegral required, notEnoughSharesFound = countDistinctShares discovered}
+                        else do
+                            -- XXX up to here is probably "locateShares".  now we move in to "fetchShares".
+                            let sharemap = makeShareMap discovered
+                            -- XXX note shares can contain failures
+                            shares <- mapM (uncurry $ downloadShare storageIndex) (Map.toList sharemap)
+                            -- XXX That was brief.  Probably missing some complexity.  now we move on to "decodeShares".
+                            -- Filter down to shares we actually got.
+                            let someShares = filter (isRight . snd) shares
+                                fewerShares = filter (isRight . snd) $ (\(sharenum, Right bs) -> (sharenum, decodeOrFail bs)) <$> someShares
+                                onlyDecoded = (\(sharenum, Right (_, _, share)) -> (fromIntegral sharenum, share)) <$> fewerShares
+                            if length onlyDecoded < fromIntegral required
+                                then pure $ Left NotEnoughDecodedShares{notEnoughDecodedSharesNeeded = fromIntegral required, notEnoughDecodedSharesFound = length onlyDecoded}
+                                else do
+                                    ciphertext <- Tahoe.CHK.decode cap onlyDecoded
+                                    case ciphertext of
+                                        Nothing -> pure $ Left ShareDecodingFailed
+                                        Just ct ->
+                                            pure . Right $ Tahoe.CHK.Encrypt.decrypt readKey ct
+
+{- | Figure the total number of distinct shares reported by all of the servers
+ we asked.
+-}
+countDistinctShares :: Ord b => [(a, Set.Set b)] -> Int
+countDistinctShares = Set.size . foldl' Set.union mempty . map snd
+
+{- | Ask one server which shares it has related to the storage index in
+ question.
+-}
+discoverShares :: LookupServer -> StorageIndex -> (StorageServerID, StorageServerAnnouncement) -> IO (Either DiscoverError (StorageServer, Set.Set ShareNum))
+discoverShares lookupServer storageIndex (_sid, sann) = do
+    case storageServerAnnouncementFURL sann of
+        Nothing -> pure $ Left StorageServerLocationUnknown
+        Just url -> do
+            let server = lookupServer url
+            case server of
+                Nothing -> pure $ Left StorageServerUnreachable
+                Just ss@StorageServer{storageServerGetBuckets} ->
+                    Right . (ss,) <$> storageServerGetBuckets storageIndex
+
+{- | Invert the mapping implied by the list of two tuples so that the servers
+ that claim to have a certain share can easily be retrieved.
+-}
+makeShareMap :: Ord k => [(v, Set.Set k)] -> Map.Map k [v]
+makeShareMap locations =
+    foldl' (Map.unionWith (<>)) mempty ((\(k, v) -> Map.fromSet (const [k]) v) <$> locations)
+
+-- | Download the bytes of a share from one (or more!) of the given servers.
+downloadShare ::
+    -- | The storage index of the share to download.
+    StorageIndex ->
+    -- | The number of the share to download.
+    ShareNum ->
+    -- | The servers which we have a reasonable belief could hold a copy of the
+    -- share.  It is common for there to be only one server holding each share
+    -- but nothing *prevents* multiple servers from having one.  In this case we
+    -- could download the share from both of them, perhaps.
+    [StorageServer] ->
+    -- | The bytes of the share or some error that was encountered during
+    -- download.
+    IO (ShareNum, Either DownloadError LB.ByteString)
+downloadShare _ shareNum [] = pure (shareNum, Left NoServers)
+downloadShare storageIndex shareNum (s : _) = do
+    -- TODO: There might be more servers.  We could try them if this fails.
+    -- On the other hand, we might get bytes but we don't verify them here so
+    -- we might also need retry logic up a level or two from here.
+    shareBytes <- storageServerRead s storageIndex shareNum
+    pure (shareNum, Right $ LB.fromStrict shareBytes)
diff --git a/test/Generators.hs b/test/Generators.hs
new file mode 100644
index 0000000000000000000000000000000000000000..08ba46e3fcbefd45e977c8d35e240821439aab1b
--- /dev/null
+++ b/test/Generators.hs
@@ -0,0 +1,22 @@
+module Generators where
+
+import Data.Int (Int64)
+import Hedgehog (MonadGen)
+import qualified Hedgehog.Gen as Gen
+import qualified Hedgehog.Range as Range
+import Tahoe.CHK.Types (Parameters (..))
+
+-- | The maximum value an Int64 can represent.
+maxInt64 :: Integer
+maxInt64 = fromIntegral (maxBound :: Int64)
+
+-- | Generate Parameters values for which all field invariants hold.
+genParameters :: MonadGen m => m Parameters
+genParameters = do
+    paramSegmentSize <- Gen.integral (Range.exponential 1 maxInt64)
+    paramTotalShares <- Gen.integral (Range.linear 2 256)
+    paramRequiredShares <- Gen.integral (Range.linear 1 (paramTotalShares - 1))
+    -- XXX We're going to get rid of "Happy" from this type.  For now it's
+    -- easier not to let this value vary and it doesn't hurt anything.
+    let paramHappyShares = 1
+    pure $ Parameters{paramSegmentSize, paramTotalShares, paramHappyShares, paramRequiredShares}
diff --git a/test/Main.hs b/test/Main.hs
deleted file mode 100644
index 3e2059e31f5127521b263b051fc7247772685e1a..0000000000000000000000000000000000000000
--- a/test/Main.hs
+++ /dev/null
@@ -1,4 +0,0 @@
-module Main (main) where
-
-main :: IO ()
-main = putStrLn "Test suite not yet implemented."
diff --git a/test/Spec.hs b/test/Spec.hs
new file mode 100644
index 0000000000000000000000000000000000000000..f2547908dc60bdaab1ccc29623c844c2f6a05fe1
--- /dev/null
+++ b/test/Spec.hs
@@ -0,0 +1,271 @@
+module Main where
+
+import Control.Exception (Exception, throwIO)
+import Control.Monad (replicateM, zipWithM_)
+import Control.Monad.IO.Class (liftIO)
+import Crypto.Classes (buildKey)
+import qualified Data.Binary as Binary
+import qualified Data.ByteString as B
+import Data.ByteString.Base32 (encodeBase32Unpadded)
+import qualified Data.ByteString.Lazy as BL
+import qualified Data.Map.Strict as Map
+import qualified Data.Set as Set
+import qualified Data.Text as T
+import Data.Text.Encoding (encodeUtf8)
+import Data.Word (Word16)
+import Generators (genParameters)
+import Hedgehog (MonadGen, diff, forAll, property)
+import qualified Hedgehog.Gen as Gen
+import qualified Hedgehog.Range as Range
+import System.IO (hSetEncoding, stderr, stdout, utf8)
+import qualified Tahoe.CHK
+import Tahoe.CHK.Capability (Reader (..), Verifier (..))
+import qualified Tahoe.CHK.Encrypt
+import Tahoe.CHK.Server (StorageServer (..), StorageServerAnnouncement (..), StorageServerID)
+import Tahoe.CHK.Types (Parameters (..))
+import Tahoe.CHK.Upload (getConvergentKey)
+import Tahoe.Download (DownloadError (..), LookupServer, download)
+import Tahoe.Server (memoryStorageServer)
+import Test.Tasty (TestTree, defaultMain, testGroup)
+import Test.Tasty.HUnit (assertEqual, testCase)
+import Test.Tasty.Hedgehog (testProperty)
+
+data PlacementError = RanOutOfPlacementInfo | RanOutOfServers deriving (Eq, Show)
+instance Exception PlacementError
+
+tests :: TestTree
+tests =
+    testGroup
+        "All tests"
+        [ testCase "no configured servers" $ do
+            -- If there are no servers then we can't possibly get enough
+            -- shares to recover the application data.
+            result <- liftIO $ download mempty (trivialCap 1 1) noServers
+            assertEqual
+                "download should fail with no servers"
+                (Left NoConfiguredServers)
+                result
+        , testCase "no reachable servers" $ do
+            -- If we can't contact any configured server then we can't
+            -- possibly get enough shares to recover the application data.
+            let anns =
+                    Map.fromList
+                        [
+                            ( "v0-abc123"
+                            , StorageServerAnnouncement
+                                { storageServerAnnouncementFURL = Nothing
+                                , storageServerAnnouncementNick = Just "unreachable"
+                                , storageServerAnnouncementPermutationSeed = Nothing
+                                }
+                            )
+                        ]
+
+            result <- liftIO $ download anns (trivialCap 1 1) noServers
+            assertEqual
+                "download should fail with no reachable servers"
+                (Left NoReachableServers)
+                result
+        , testCase "not enough shares" $ do
+            -- If we can't recover enough shares from the configured servers
+            -- then we can't possibly get enough shares to recover the
+            -- application data.
+            let anns =
+                    Map.fromList
+                        [
+                            ( "v0-abc123"
+                            , StorageServerAnnouncement
+                                { storageServerAnnouncementFURL = Just "somewhere"
+                                , storageServerAnnouncementNick = Just "abc123"
+                                , storageServerAnnouncementPermutationSeed = Nothing
+                                }
+                            )
+                        ]
+                cap = trivialCap 3 3
+
+            -- Two shares exist.
+            server <- memoryStorageServer
+            storageServerWrite server (storageIndex . verifier $ cap) 0 0 "Hello world"
+            storageServerWrite server (storageIndex . verifier $ cap) 1 0 "Hello world"
+
+            -- Make the server reachable.
+            let openServer furl =
+                    if furl == "somewhere"
+                        then pure server
+                        else Nothing
+
+            -- Try to download the cap which requires three shares to reconstruct.
+            result <- liftIO $ download anns cap openServer
+            assertEqual
+                "download should fail with not enough shares"
+                (Left NotEnoughShares{notEnoughSharesNeeded = 3, notEnoughSharesFound = 2})
+                result
+        , testCase "not enough distinct shares" $ do
+            -- If we can't recover enough *distinct* shares from the
+            -- configured servers then we can't possibly get enough shares to
+            -- recover the application data.  Duplicate shares do us no good.
+            let anns =
+                    Map.fromList
+                        [
+                            ( "v0-abc123"
+                            , StorageServerAnnouncement
+                                { storageServerAnnouncementFURL = Just "somewhere"
+                                , storageServerAnnouncementNick = Just "abc123"
+                                , storageServerAnnouncementPermutationSeed = Nothing
+                                }
+                            )
+                        ,
+                            ( "v0-abc456"
+                            , StorageServerAnnouncement
+                                { storageServerAnnouncementFURL = Just "elsewhere"
+                                , storageServerAnnouncementNick = Just "abc123"
+                                , storageServerAnnouncementPermutationSeed = Nothing
+                                }
+                            )
+                        ]
+                cap = trivialCap 3 3
+
+            -- Three shares exist
+            somewhere <- memoryStorageServer
+            let idx = storageIndex . verifier $ cap
+                offset = 0
+            storageServerWrite somewhere idx 0 offset "Hello world"
+            storageServerWrite somewhere idx 1 offset "Hello world"
+            -- But this one is just a duplicate of share 0 on the other
+            -- server.
+            elsewhere <- memoryStorageServer
+            storageServerWrite elsewhere idx 0 offset "Hello world"
+
+            -- Make the server reachable.
+            let openServer furl =
+                    case furl of
+                        "somewhere" -> pure somewhere
+                        "elsewhere" -> pure elsewhere
+                        _ -> Nothing
+
+            -- Try to download the cap which requires three shares to reconstruct.
+            result <- liftIO $ download anns cap openServer
+            assertEqual
+                "download should fail with not enough shares"
+                (Left NotEnoughShares{notEnoughSharesNeeded = 3, notEnoughSharesFound = 2})
+                result
+        , testProperty "success" $
+            property $ do
+                -- If we can recover enough distinct, decodeable shares from the
+                -- configured servers then we can recover the application data.
+
+                -- Generates configurations where it should be possible to recover
+                -- the data (have all the shares, have enough of the shares,
+                -- spread them across many servers, concentrate them on one or a
+                -- few, etc)
+
+                secret <- forAll $ Gen.bytes (Range.singleton 32)
+                plaintext <- forAll $ BL.fromStrict <$> Gen.bytes (Range.exponential 56 1024)
+                params@Parameters{paramTotalShares} <- forAll genParameters
+
+                -- Since multiple shares can be placed on a single server, as long
+                -- as we have one server we have a valid case.  Since some shares
+                -- might be placed non-optimally it is also nice to allow for some
+                -- empty servers so allow for that as well.
+                let numServers = Range.linear 1 (fromIntegral paramTotalShares + 1)
+                    serverIDs = Gen.text (Range.singleton 2) Gen.ascii
+                serverIDs' <- forAll $ Gen.set numServers serverIDs
+                -- Constructor <$> arbitrary <*> arbitrary
+
+                -- Choose a share distribution.  Each element of the resulting
+                -- list tells us how many shares to place on the next server, for
+                -- some arbitrary (stable) server ordering.
+                perServerShareCount <-
+                    forAll $
+                        genListWithSum (length serverIDs') (fromIntegral paramTotalShares)
+
+                -- Make the servers.
+                servers <- liftIO $ replicateM (length serverIDs') memoryStorageServer
+
+                -- Encrypt and encode the data into shares.
+                let key = getConvergentKey secret params plaintext
+                    ciphertext = Tahoe.CHK.Encrypt.encrypt key plaintext
+                (shares, cap) <- liftIO $ Tahoe.CHK.encode key params ciphertext
+
+                -- Distribute the shares.
+                liftIO $ placeShares cap (Binary.encode <$> shares) perServerShareCount servers 0
+
+                let serverMap = Map.fromList $ zip (Set.toList serverIDs') servers
+                    lookupServer = someServers serverMap
+                    serverAnnouncements = Map.fromSet makeAnn serverIDs'
+
+                -- Recover the plaintext from the servers.
+                result <- liftIO $ download serverAnnouncements cap lookupServer
+                diff (Right plaintext) (==) result
+        ]
+  where
+    -- A server lookup function that always fails.
+    noServers _ = Nothing
+
+    -- A server lookup function that finds servers already present in a Map.
+    someServers :: Map.Map StorageServerID StorageServer -> LookupServer
+    someServers servers = flip Map.lookup servers . parseURL
+      where
+        -- Exactly match the nonsense makeAnn spits out
+        parseURL = T.take 2 . T.drop 5
+
+    --- PHILOSOFY
+    -- We wish that share numbers were an opaque type instead of a
+    -- numeric/integral type.  This is not the place to argue the point
+    -- though.
+    placeShares :: Reader -> [BL.ByteString] -> [Int] -> [StorageServer] -> Int -> IO ()
+    -- Out of shares, done.
+    placeShares _ [] _ _ _ = pure ()
+    -- Out of placement info but not out of shares is a programming error.
+    placeShares _ _ [] _ _ = throwIO RanOutOfPlacementInfo
+    -- Out of servers but not out of shares is a programming error.
+    placeShares _ _ _ [] _ = throwIO RanOutOfServers
+    -- Having some of all three means we can make progress.
+    placeShares cap shares (n : ns) (s : ss) sharesSoFar = do
+        -- write the right number of shares to this server
+        zipWithM_
+            (\shnum share -> storageServerWrite s (storageIndex . verifier $ cap) shnum 0 share)
+            [fromIntegral sharesSoFar ..]
+            (BL.toStrict <$> take n shares)
+        -- recurse to write the rest
+        placeShares cap (drop n shares) ns ss (sharesSoFar + n)
+
+    -- Make up a distinct (but nonsense) announcement for a given storage
+    -- server identifier.
+    makeAnn :: StorageServerID -> StorageServerAnnouncement
+    makeAnn sid =
+        StorageServerAnnouncement
+            { storageServerAnnouncementFURL = Just $ "pb://" <> sid <> "/" <> sid
+            , storageServerAnnouncementNick = Just . encodeBase32Unpadded . encodeUtf8 $ sid
+            , storageServerAnnouncementPermutationSeed = Nothing
+            }
+
+    -- Generate lists of ints that sum to a given total.
+    genListWithSum :: MonadGen m => Int -> Int -> m [Int]
+    -- We hit the target.
+    genListWithSum _ 0 = pure []
+    -- We only have room for one more element.
+    genListWithSum 1 t = pure [t]
+    -- Use up some of what's left on one element and recurse.
+    genListWithSum maxLength t = do
+        v <- Gen.int (Range.linear 0 t)
+        (v :) <$> genListWithSum (maxLength - 1) (t - v)
+
+trivialCap :: Word16 -> Word16 -> Reader
+trivialCap required total = Reader{..}
+  where
+    Just readKey = buildKey $ B.replicate 32 0x00
+    storageIndex = B.replicate 32 0x00
+    fingerprint = B.replicate 32 0x00
+    size = 1234
+    verifier = Verifier{..}
+
+main :: IO ()
+main = do
+    -- Hedgehog writes some non-ASCII and the whole test process will die if
+    -- it can't be encoded.  Increase the chances that all of the output can
+    -- be encoded by forcing the use of UTF-8 (overriding the LANG-based
+    -- choice normally made).
+    hSetEncoding stdout utf8
+    hSetEncoding stderr utf8
+
+    defaultMain tests