diff --git a/README.md b/README.md
index c34c52b86a12b2c23ee4ecc7bfbc3d7659a67339..b125ead62c5b1422eea5065034692bb6f03377eb 100644
--- a/README.md
+++ b/README.md
@@ -7,8 +7,8 @@ It aims for bit-for-bit compatibility with the original Python implementation.
 
 ### What is the current state?
 
-It's just starting.
-It can't do much.
+* It can download immutable and mutable shares from Great Black Swamp storage servers.
+* It can interpret, decode, and decrypt the data for CHK- and SDMF-encoded shares to recover the plaintext.
 
 ## Why does it exist?
 
diff --git a/cabal.project b/cabal.project
index 55be17a13a683db9dd1f7743b243c5f56a0f587a..64918bf7b7263adb122812774298420da4153234 100644
--- a/cabal.project
+++ b/cabal.project
@@ -2,8 +2,9 @@ packages: .
           -- These aren't released on hackage yet so we have to grab them
           -- straight from the vcs host.  Probably should do some releases
           -- soon.
+          https://whetstone.private.storage/privatestorage/tahoe-ssk/-/archive/0.2.0.0/tahoe-ssk-0.2.0.0.tar.gz
           https://whetstone.private.storage/privatestorage/tahoe-chk/-/archive/0.1.0.1/tahoe-chk-0.1.0.1.tar.gz
-          https://whetstone.private.storage/privatestorage/tahoe-great-black-swamp/-/archive/0.2.0.2/tahoe-great-black-swamp-0.2.0.2.tar.gz
+          https://whetstone.private.storage/privatestorage/tahoe-great-black-swamp/-/archive/0.3.0.0/tahoe-great-black-swamp-0.3.0.0.tar.gz
 
 package zlib
         -- Turn on discovery of the underlying zlib using pkg-config.  This
diff --git a/download-sdmf/Main.hs b/download-sdmf/Main.hs
new file mode 100644
index 0000000000000000000000000000000000000000..4e981e104d4c59e4c7e37b885b1851ab1a5d2a43
--- /dev/null
+++ b/download-sdmf/Main.hs
@@ -0,0 +1,34 @@
+module Main where
+
+import qualified Data.ByteString as B
+import qualified Data.ByteString.Char8 as C8
+import qualified Data.ByteString.Lazy as BL
+import qualified Data.Text as T
+import Data.Yaml (decodeEither')
+import System.Environment (getArgs)
+import Tahoe.Announcement (Announcements (..))
+import Tahoe.Download (announcementToMutableStorageServer, download)
+import Tahoe.SDMF (SDMF (..), pCapability, writerReader)
+import Text.Megaparsec (parse)
+
+main :: IO ()
+main = do
+    [announcementPath, readCap] <- getArgs
+    -- Load server announcements
+    announcementsBytes <- B.readFile announcementPath
+    let Right (Announcements announcements) = decodeEither' announcementsBytes
+
+    -- Accept & parse read capability
+    case parse pCapability "<argv>" (T.pack readCap) of
+        Left e -> print $ "Failed to parse cap: " <> show e
+        Right (SDMFVerifier _) -> C8.putStrLn "Nothing currently implemented for verifier caps."
+        Right (SDMFWriter rwcap) -> go announcements (writerReader rwcap)
+        Right (SDMFReader rocap) -> go announcements rocap
+  where
+    go announcements cap = do
+        -- Download & decode the shares
+        result <- download announcements cap announcementToMutableStorageServer
+
+        -- Show the result
+        putStrLn "Your result:"
+        either print (C8.putStrLn . BL.toStrict) result
diff --git a/flake.lock b/flake.lock
index d5833947200194bc4a0e1131fef92513461b13cf..605b213984989df112e71a403a58bb0b04998a4b 100644
--- a/flake.lock
+++ b/flake.lock
@@ -48,6 +48,22 @@
         "type": "github"
       }
     },
+    "flake-compat_4": {
+      "flake": false,
+      "locked": {
+        "lastModified": 1673956053,
+        "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
+        "owner": "edolstra",
+        "repo": "flake-compat",
+        "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
+        "type": "github"
+      },
+      "original": {
+        "owner": "edolstra",
+        "repo": "flake-compat",
+        "type": "github"
+      }
+    },
     "flake-utils": {
       "locked": {
         "lastModified": 1667395993,
@@ -138,6 +154,39 @@
         "type": "github"
       }
     },
+    "flake-utils_7": {
+      "inputs": {
+        "systems": "systems"
+      },
+      "locked": {
+        "lastModified": 1681202837,
+        "narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
+        "owner": "numtide",
+        "repo": "flake-utils",
+        "rev": "cfacdce06f30d2b68473a46042957675eebb3401",
+        "type": "github"
+      },
+      "original": {
+        "owner": "numtide",
+        "repo": "flake-utils",
+        "type": "github"
+      }
+    },
+    "flake-utils_8": {
+      "locked": {
+        "lastModified": 1676283394,
+        "narHash": "sha256-XX2f9c3iySLCw54rJ/CZs+ZK6IQy7GXNY4nSOyu2QG4=",
+        "owner": "numtide",
+        "repo": "flake-utils",
+        "rev": "3db36a8b464d0c4532ba1c7dda728f4576d6d073",
+        "type": "github"
+      },
+      "original": {
+        "owner": "numtide",
+        "repo": "flake-utils",
+        "type": "github"
+      }
+    },
     "gitignore": {
       "inputs": {
         "nixpkgs": [
@@ -206,6 +255,29 @@
         "type": "github"
       }
     },
+    "gitignore_4": {
+      "inputs": {
+        "nixpkgs": [
+          "tahoe-ssk",
+          "hs-flake-utils",
+          "pre-commit-hooks",
+          "nixpkgs"
+        ]
+      },
+      "locked": {
+        "lastModified": 1660459072,
+        "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=",
+        "owner": "hercules-ci",
+        "repo": "gitignore.nix",
+        "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73",
+        "type": "github"
+      },
+      "original": {
+        "owner": "hercules-ci",
+        "repo": "gitignore.nix",
+        "type": "github"
+      }
+    },
     "hs-flake-utils": {
       "inputs": {
         "flake-utils": "flake-utils_2",
@@ -269,6 +341,27 @@
         "url": "https://whetstone.private.storage/jcalderone/hs-flake-utils.git"
       }
     },
+    "hs-flake-utils_4": {
+      "inputs": {
+        "flake-utils": "flake-utils_8",
+        "nixpkgs": "nixpkgs_4",
+        "pre-commit-hooks": "pre-commit-hooks_4"
+      },
+      "locked": {
+        "lastModified": 1681762240,
+        "narHash": "sha256-+PLx9xHBvV70dA7Gy/+YTH1w3PcSOrGV0z0rGxts8jU=",
+        "ref": "main",
+        "rev": "a51e591b7fdf8881ac0237452691df7b1aceecd3",
+        "revCount": 10,
+        "type": "git",
+        "url": "https://whetstone.private.storage/jcalderone/hs-flake-utils.git"
+      },
+      "original": {
+        "ref": "main",
+        "type": "git",
+        "url": "https://whetstone.private.storage/jcalderone/hs-flake-utils.git"
+      }
+    },
     "nixpkgs": {
       "locked": {
         "lastModified": 1677624842,
@@ -333,6 +426,22 @@
         "type": "github"
       }
     },
+    "nixpkgs-stable_4": {
+      "locked": {
+        "lastModified": 1673800717,
+        "narHash": "sha256-SFHraUqLSu5cC6IxTprex/nTsI81ZQAtDvlBvGDWfnA=",
+        "owner": "NixOS",
+        "repo": "nixpkgs",
+        "rev": "2f9fd351ec37f5d479556cd48be4ca340da59b8f",
+        "type": "github"
+      },
+      "original": {
+        "owner": "NixOS",
+        "ref": "nixos-22.11",
+        "repo": "nixpkgs",
+        "type": "github"
+      }
+    },
     "nixpkgs_2": {
       "locked": {
         "lastModified": 1677624842,
@@ -365,6 +474,22 @@
         "type": "github"
       }
     },
+    "nixpkgs_4": {
+      "locked": {
+        "lastModified": 1677624842,
+        "narHash": "sha256-4DF9DbDuK4/+KYx0L6XcPBeDHUFVCtzok2fWtwXtb5w=",
+        "owner": "nixos",
+        "repo": "nixpkgs",
+        "rev": "d70f5cd5c3bef45f7f52698f39e7cc7a89daa7f0",
+        "type": "github"
+      },
+      "original": {
+        "owner": "nixos",
+        "ref": "nixos-22.11",
+        "repo": "nixpkgs",
+        "type": "github"
+      }
+    },
     "pre-commit-hooks": {
       "inputs": {
         "flake-compat": "flake-compat",
@@ -453,6 +578,36 @@
         "type": "github"
       }
     },
+    "pre-commit-hooks_4": {
+      "inputs": {
+        "flake-compat": "flake-compat_4",
+        "flake-utils": [
+          "tahoe-ssk",
+          "hs-flake-utils",
+          "flake-utils"
+        ],
+        "gitignore": "gitignore_4",
+        "nixpkgs": [
+          "tahoe-ssk",
+          "hs-flake-utils",
+          "nixpkgs"
+        ],
+        "nixpkgs-stable": "nixpkgs-stable_4"
+      },
+      "locked": {
+        "lastModified": 1677722096,
+        "narHash": "sha256-7mjVMvCs9InnrRybBfr5ohqcOz+pyEX8m22C1XsDilg=",
+        "owner": "cachix",
+        "repo": "pre-commit-hooks.nix",
+        "rev": "61a3511668891c68ebd19d40122150b98dc2fe3b",
+        "type": "github"
+      },
+      "original": {
+        "owner": "cachix",
+        "repo": "pre-commit-hooks.nix",
+        "type": "github"
+      }
+    },
     "root": {
       "inputs": {
         "flake-utils": "flake-utils",
@@ -462,7 +617,23 @@
           "nixpkgs"
         ],
         "tahoe-chk": "tahoe-chk",
-        "tahoe-great-black-swamp": "tahoe-great-black-swamp"
+        "tahoe-great-black-swamp": "tahoe-great-black-swamp",
+        "tahoe-ssk": "tahoe-ssk"
+      }
+    },
+    "systems": {
+      "locked": {
+        "lastModified": 1681028828,
+        "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+        "owner": "nix-systems",
+        "repo": "default",
+        "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+        "type": "github"
+      },
+      "original": {
+        "owner": "nix-systems",
+        "repo": "default",
+        "type": "github"
       }
     },
     "tahoe-chk": {
@@ -502,19 +673,46 @@
         ]
       },
       "locked": {
-        "lastModified": 1683553313,
-        "narHash": "sha256-tXZc8ZDNkHtegoM1HlDUf1Jr5IE04aobZDpnBaBm53w=",
-        "ref": "refs/tags/0.2.0.2",
-        "rev": "ab799ee24d7150e13b300b86240433ecdb783577",
-        "revCount": 187,
+        "lastModified": 1685640961,
+        "narHash": "sha256-zH9bRWqm1dYcgg9/VSVJ7y8qZIM8fwp5H0rRsaWT8lg=",
+        "ref": "refs/tags/0.3.0.0",
+        "rev": "3d00d90d1e64897aa92f33a012343a5ca46fe7fc",
+        "revCount": 194,
         "type": "git",
         "url": "https://whetstone.private.storage/PrivateStorage/tahoe-great-black-swamp"
       },
       "original": {
-        "ref": "refs/tags/0.2.0.2",
+        "ref": "refs/tags/0.3.0.0",
         "type": "git",
         "url": "https://whetstone.private.storage/PrivateStorage/tahoe-great-black-swamp"
       }
+    },
+    "tahoe-ssk": {
+      "inputs": {
+        "flake-utils": "flake-utils_7",
+        "hs-flake-utils": "hs-flake-utils_4",
+        "nixpkgs": [
+          "hs-flake-utils",
+          "nixpkgs"
+        ],
+        "tahoe-chk": [
+          "tahoe-chk"
+        ]
+      },
+      "locked": {
+        "lastModified": 1685641443,
+        "narHash": "sha256-lgHcvFz6s07HXdGCz1C/dOSVLi0HiAtHiv0Na4QkQEg=",
+        "ref": "refs/tags/0.2.0.0",
+        "rev": "1afe634278c96d61b8acb994ec82a71a6394d032",
+        "revCount": 84,
+        "type": "git",
+        "url": "https://whetstone.private.storage/PrivateStorage/tahoe-ssk"
+      },
+      "original": {
+        "ref": "refs/tags/0.2.0.0",
+        "type": "git",
+        "url": "https://whetstone.private.storage/PrivateStorage/tahoe-ssk"
+      }
     }
   },
   "root": "root",
diff --git a/flake.nix b/flake.nix
index 01d65c63769c028fcc7dbeaf82221e4ed9712a87..d04dcad05bc4ae044d5beb0ccc2029fbc2b687ce 100644
--- a/flake.nix
+++ b/flake.nix
@@ -12,8 +12,14 @@
       inputs.nixpkgs.follows = "hs-flake-utils/nixpkgs";
     };
 
+    tahoe-ssk = {
+      url = "git+https://whetstone.private.storage/PrivateStorage/tahoe-ssk?ref=refs/tags/0.2.0.0";
+      inputs.nixpkgs.follows = "hs-flake-utils/nixpkgs";
+      inputs.tahoe-chk.follows = "tahoe-chk";
+    };
+
     tahoe-great-black-swamp = {
-      url = "git+https://whetstone.private.storage/PrivateStorage/tahoe-great-black-swamp?ref=refs/tags/0.2.0.2";
+      url = "git+https://whetstone.private.storage/PrivateStorage/tahoe-great-black-swamp?ref=refs/tags/0.3.0.0";
       inputs.nixpkgs.follows = "hs-flake-utils/nixpkgs";
       inputs.tahoe-chk.follows = "tahoe-chk";
     };
@@ -25,6 +31,7 @@
     flake-utils,
     hs-flake-utils,
     tahoe-chk,
+    tahoe-ssk,
     tahoe-great-black-swamp,
   }: let
     ulib = flake-utils.lib;
@@ -42,6 +49,7 @@
         packageName = "gbs-downloader";
         hsPkgsOverrides = import ./nix/haskell-packages.nix {
           tahoe-chk = tahoe-chk.outputs.packages.${system}.default;
+          tahoe-ssk = tahoe-ssk.outputs.packages.${system}.default;
           tahoe-great-black-swamp = tahoe-great-black-swamp.outputs.packages.${system}.default;
           haskellLib = pkgs.haskell.lib;
         };
diff --git a/gbs-downloader.cabal b/gbs-downloader.cabal
index ba2597aa1d6bdb02aadeb79eb14fd2a3f267a66b..5b79beb1da8cc4ba249423f856ab5a382f9efb8d 100644
--- a/gbs-downloader.cabal
+++ b/gbs-downloader.cabal
@@ -81,8 +81,10 @@ library
   exposed-modules:
     Tahoe.Announcement
     Tahoe.Download
+    Tahoe.Download.Internal.Capability
     Tahoe.Download.Internal.Client
     Tahoe.Download.Internal.Immutable
+    Tahoe.Download.Internal.Mutable
 
   -- Modules included in this library but not exported.
   -- other-modules:
@@ -104,11 +106,13 @@ library
     , exceptions
     , http-client
     , http-client-tls
+    , http-types
     , network-uri
     , servant-client
     , servant-client-core
     , tahoe-chk
-    , tahoe-great-black-swamp  >=0.2 && <0.3
+    , tahoe-great-black-swamp  >=0.3 && <0.4
+    , tahoe-ssk                >=0.2 && <0.3
     , text
     , yaml
 
@@ -118,7 +122,7 @@ library
   -- Base language which the package is written in.
   default-language: Haskell2010
 
-executable gbs-download
+executable gbs-download-chk
   -- Import common warning flags.
   import:
     warnings
@@ -151,6 +155,26 @@ executable gbs-download
   -- Base language which the package is written in.
   default-language: Haskell2010
 
+executable gbs-download-sdmf
+  import:
+    warnings
+    , language
+
+  main-is:          Main.hs
+  build-depends:
+    , aeson
+    , base
+    , bytestring
+    , containers
+    , gbs-downloader
+    , megaparsec
+    , tahoe-ssk       >=0.2 && <0.3
+    , text
+    , yaml
+
+  hs-source-dirs:   download-sdmf
+  default-language: Haskell2010
+
 test-suite gbs-downloader-test
   -- Import common warning flags.
   import:
@@ -174,18 +198,23 @@ test-suite gbs-downloader-test
 
   -- Test dependencies.
   build-depends:
+    , asn1-encoding
+    , asn1-types
     , base
     , base32
     , binary
     , bytestring
     , containers
     , crypto-api
+    , cryptonite
     , data-default-class
     , gbs-downloader
     , hedgehog
     , tahoe-chk
+    , tahoe-ssk           >=0.2 && <0.3
     , tasty
     , tasty-hedgehog
     , tasty-hunit
     , text
+    , x509
     , yaml
diff --git a/nix/haskell-packages.nix b/nix/haskell-packages.nix
index 175fc2091a3906e508c6b4d7e1146a01122494c1..dbf474bdcd3a3ddcda31a2255387b72a9fe477a4 100644
--- a/nix/haskell-packages.nix
+++ b/nix/haskell-packages.nix
@@ -1,9 +1,11 @@
 {
   haskellLib,
   tahoe-chk,
+  tahoe-ssk,
   tahoe-great-black-swamp,
 }: hfinal: hprev: {
   inherit tahoe-chk;
+  inherit tahoe-ssk;
   inherit tahoe-great-black-swamp;
 
   # A broken dependency of a tahoe-great-black-swamp executable that we don't
diff --git a/src/Tahoe/Download.hs b/src/Tahoe/Download.hs
index 31697c3a6cfc59098a9c6a701936f80ada29f1b0..93488ceb27f6bd5e10a78d7ef5ff9a852ae2ef26 100644
--- a/src/Tahoe/Download.hs
+++ b/src/Tahoe/Download.hs
@@ -1,5 +1,4 @@
 {-# LANGUAGE FlexibleContexts #-}
-{-# LANGUAGE FunctionalDependencies #-}
 
 {- | A high-level interface to downloading share data as bytes from storage
  servers.
@@ -12,6 +11,7 @@ module Tahoe.Download (
     discoverShares,
     download,
     announcementToImmutableStorageServer,
+    announcementToMutableStorageServer,
     getShareNumbers,
 ) where
 
@@ -26,19 +26,12 @@ import Data.List (foldl')
 import qualified Data.Map.Strict as Map
 import qualified Data.Set as Set
 import Tahoe.Announcement (StorageServerAnnouncement)
-import qualified Tahoe.CHK
-import Tahoe.CHK.Capability (Reader (..), Verifier (..))
-import qualified Tahoe.CHK.Capability as CHK
-import qualified Tahoe.CHK.Encrypt
 import Tahoe.CHK.Server (StorageServer (..), StorageServerID)
-import qualified Tahoe.CHK.Share
 import Tahoe.CHK.Types (ShareNum, StorageIndex)
+import Tahoe.Download.Internal.Capability
 import Tahoe.Download.Internal.Client
 import Tahoe.Download.Internal.Immutable
-
-print' :: MonadIO m => String -> m ()
--- print' = liftIO . print
-print' = const $ pure ()
+import Tahoe.Download.Internal.Mutable
 
 -- | Partially describe one share download.
 type DownloadTask = (ShareNum, StorageServer)
@@ -50,12 +43,12 @@ type Share = (ShareNum, LB.ByteString)
  given servers, if possible.
 -}
 download ::
-    (MonadIO m, Verifiable v, Readable r v) =>
+    (MonadIO m, Verifiable verifyCap, Readable readCap verifyCap) =>
     -- | Information about the servers from which to consider downloading shares
     -- representing the application data.
     Map.Map StorageServerID StorageServerAnnouncement ->
     -- | The read capability for the application data.
-    r ->
+    readCap ->
     -- | Get functions for interacting with a server given its URL.
     LookupServer m ->
     -- | Either a description of how the recovery failed or the recovered
@@ -68,82 +61,43 @@ download servers cap lookupServer = do
     -- TODO: If getRequiredTotal fails on the first storage server, we may
     -- need to try more.  If it fails for all of them, we need to represent
     -- the failure coherently.
-    ss <- firstStorageServer (Map.elems servers) lookupServer
-    (required, _) <- getRequiredTotal verifier ss
-    locationE <- locateShares servers lookupServer storageIndex (fromIntegral required)
-    print' "Finished locating shares"
-    case locationE of
-        Left err -> do
-            print' "Got an error locating shares"
-            pure $ Left err
-        Right discovered -> do
-            print' "Found some shares, fetching them"
-            -- XXX note shares can contain failures
-            shares <- executeDownloadTasks storageIndex (makeDownloadTasks =<< discovered)
-            print' "Fetched the shares, decoding them"
-            s <- decodeShares cap shares required
-            print' "Decoded them"
-            pure s
-
--- We also need "first successful share"!
-firstStorageServer :: Monad m => [StorageServerAnnouncement] -> LookupServer m -> m StorageServer
-firstStorageServer servers finder = do
-    responses <- mapM finder servers
-    pure $ head $ take 1 $ rights responses -- XXX don't do this at home kids, head isn't safe
-
--- | A capability which confers the ability to locate and verify some stored data.
-class Verifiable v where
-    -- | Ask a storage server which share numbers related to this capability it
-    -- is holding.  This is an unverified result and the storage server could
-    -- present incorrect information.  Even if it correctly reports that it
-    -- holds a share, it could decline to give it out when asked.
-    getShareNumbers :: MonadIO m => v -> StorageServer -> m (Set.Set ShareNum)
-
-    -- | Get the encoding parameters used for the shares of this capability.
-    -- The information is presented as a tuple of (required, total).
-    getRequiredTotal :: MonadIO m => v -> StorageServer -> m (Int, Int)
-
-    -- | Get the location information for shares of this capability.
-    getStorageIndex :: v -> StorageIndex
-
-instance Verifiable CHK.Verifier where
-    getShareNumbers v s = liftIO $ storageServerGetBuckets s (storageIndex v)
-    getStorageIndex Verifier{storageIndex} = storageIndex
-
-    -- CHK is pure, we don't have to ask the StorageServer
-    getRequiredTotal Verifier{required, total} _ = pure (fromIntegral required, fromIntegral total)
-
-{- | A capability which confers the ability to interpret some stored data to
- recover the original plaintext.  Additionally, it can be attentuated to a
- Verifiable.
--}
-class (Verifiable v) => Readable r v | r -> v where
-    -- | Attentuate the capability.
-    getVerifiable :: r -> v
-
-    -- | Interpret the required number of shares to recover the plaintext.
-    --
-    -- Note: might want to split the two functions below out of decodeShare
-    --
-    -- shareToCipherText :: r -> [(Int, WhichShare)] -> LB.ByteString
-    --
-    -- cipherTextToPlainText :: r -> LB.ByteString -> LB.ByteString
-    decodeShare :: MonadIO m => r -> [(Int, WhichShare)] -> m (Either DownloadError LB.ByteString)
-
-instance Readable CHK.Reader CHK.Verifier where
-    getVerifiable = verifier
-    decodeShare r shareList = do
-        cipherText <- liftIO $ Tahoe.CHK.decode r (second unWhich <$> shareList)
-        case cipherText of
-            Nothing -> pure $ Left ShareDecodingFailed
-            Just ct ->
-                pure . Right $ Tahoe.CHK.Encrypt.decrypt (readKey r) ct
-
-{- | Represent the kind of share to operate on.  This forms a closed world of
- share types.  It might eventually be interesting to make an open world
- variation instead.
+    someParam <- firstRightM lookupServer (getRequiredTotal verifier) (Map.elems servers)
+    case someParam of
+        Left errs -> pure . Left $ if servers == mempty then NoConfiguredServers else NoReachableServers (StorageServerUnreachable <$> errs)
+        Right (required, _) -> do
+            locationE <- locateShares servers lookupServer storageIndex (fromIntegral required)
+            print' "Finished locating shares"
+            case locationE of
+                Left err -> do
+                    print' "Got an error locating shares"
+                    pure $ Left err
+                Right discovered -> do
+                    print' "Found some shares, fetching them"
+                    -- XXX note shares can contain failures
+                    shares <- executeDownloadTasks storageIndex (makeDownloadTasks =<< discovered)
+                    print' "Fetched the shares, decoding them"
+                    s <- decodeShares cap shares required
+                    print' "Decoded them"
+                    pure s
+
+{- | Apply a monadic operation to each element of a list and another monadic
+ operation values in the resulting Rights.  If all of the results are Lefts or
+ Nothings, return a list of the values in the Lefts.  Otherwise, return the
+ *first* Right.
 -}
-newtype WhichShare = CHK {unWhich :: Tahoe.CHK.Share.Share} -- \| SDMF SDMF.Share
+firstRightM :: MonadIO m => (a -> m (Either b c)) -> (c -> m (Maybe d)) -> [a] -> m (Either [b] d)
+firstRightM _ _ [] = pure $ Left []
+firstRightM f op (x : xs) = do
+    s <- f x
+    case s of
+        Left bs -> first (bs :) <$> recurse
+        Right ss -> do
+            r <- op ss
+            case r of
+                Nothing -> recurse
+                Just d -> pure $ Right d
+  where
+    recurse = firstRightM f op xs
 
 {- | Deserialize some bytes representing some kind of share to that kind of
  share, if possible.
@@ -151,8 +105,10 @@ newtype WhichShare = CHK {unWhich :: Tahoe.CHK.Share.Share} -- \| SDMF SDMF.Shar
 bytesToShare :: LB.ByteString -> Either DeserializeError WhichShare
 bytesToShare bytes = do
     case decodeOrFail bytes of
-        Left _ -> Left UnknownDeserializeError
         Right (_, _, r) -> Right $ CHK r
+        Left _ -> case decodeOrFail bytes of
+            Right (_, _, r) -> Right $ SDMF r
+            Left _ -> Left UnknownDeserializeError
 
 {- | Execute each download task sequentially and return only the successful
  results.
@@ -207,9 +163,9 @@ locateShares servers lookupServer storageIndex required =
  decode them and decrypt the contents of possible.
 -}
 decodeShares ::
-    (MonadIO m, Readable r v) =>
+    (MonadIO m, Readable readCap verifyCap) =>
     -- | The read capability which allows the contents to be decrypted.
-    r ->
+    readCap ->
     -- | The results of downloading the shares.
     [Share] ->
     Int ->
diff --git a/src/Tahoe/Download/Internal/Capability.hs b/src/Tahoe/Download/Internal/Capability.hs
new file mode 100644
index 0000000000000000000000000000000000000000..435138756447097e5e0cf29f246f42618981b0ae
--- /dev/null
+++ b/src/Tahoe/Download/Internal/Capability.hs
@@ -0,0 +1,101 @@
+{-# LANGUAGE FunctionalDependencies #-}
+
+module Tahoe.Download.Internal.Capability where
+
+import Control.Monad.IO.Class
+import Data.Bifunctor (Bifunctor (..))
+import Data.Binary (decodeOrFail)
+import qualified Data.ByteString.Lazy as LB
+import qualified Data.Set as Set
+import qualified Tahoe.CHK
+import qualified Tahoe.CHK.Capability as CHK
+import qualified Tahoe.CHK.Encrypt
+import Tahoe.CHK.Server
+import qualified Tahoe.CHK.Share
+import Tahoe.CHK.Types
+import Tahoe.Download.Internal.Client
+import qualified Tahoe.SDMF as SDMF
+import qualified Tahoe.SDMF.Internal.Keys as SDMF.Keys
+
+{- | Represent the kind of share to operate on.  This forms a closed world of
+ share types.  It might eventually be interesting to make an open world
+ variation instead.
+-}
+data WhichShare
+    = CHK {unWhichCHK :: Tahoe.CHK.Share.Share}
+    | SDMF {unWhichSDMF :: SDMF.Share}
+
+-- | A capability which confers the ability to locate and verify some stored data.
+class Verifiable v where
+    -- | Ask a storage server which share numbers related to this capability it
+    -- is holding.  This is an unverified result and the storage server could
+    -- present incorrect information.  Even if it correctly reports that it
+    -- holds a share, it could decline to give it out when asked.
+    getShareNumbers :: MonadIO m => v -> StorageServer -> m (Set.Set ShareNum)
+
+    -- | Get the encoding parameters used for the shares of this capability.
+    -- The information is presented as a tuple of (required, total).
+    getRequiredTotal :: MonadIO m => v -> StorageServer -> m (Maybe (Int, Int))
+
+    -- | Get the location information for shares of this capability.
+    getStorageIndex :: v -> StorageIndex
+
+class (Verifiable v) => Readable r v | r -> v where
+    -- | Attentuate the capability.
+    getVerifiable :: r -> v
+
+    -- | Interpret the required number of shares to recover the plaintext.
+    --
+    -- Note: might want to split the two functions below out of decodeShare
+    --
+    -- shareToCipherText :: r -> [(Int, WhichShare)] -> LB.ByteString
+    --
+    -- cipherTextToPlainText :: r -> LB.ByteString -> LB.ByteString
+    decodeShare :: MonadIO m => r -> [(Int, WhichShare)] -> m (Either DownloadError LB.ByteString)
+
+instance Verifiable CHK.Verifier where
+    getShareNumbers v s = liftIO $ storageServerGetBuckets s (CHK.storageIndex v)
+    getStorageIndex CHK.Verifier{storageIndex} = storageIndex
+
+    -- CHK is pure, we don't have to ask the StorageServer
+    getRequiredTotal CHK.Verifier{required, total} _ = pure $ pure (fromIntegral required, fromIntegral total)
+
+{- | A capability which confers the ability to interpret some stored data to
+ recover the original plaintext.  Additionally, it can be attentuated to a
+ Verifiable.
+-}
+instance Readable CHK.Reader CHK.Verifier where
+    getVerifiable = CHK.verifier
+    decodeShare r shareList = do
+        cipherText <- liftIO $ Tahoe.CHK.decode r (second unWhichCHK <$> shareList)
+        case cipherText of
+            Nothing -> pure $ Left ShareDecodingFailed
+            Just ct ->
+                pure . Right $ Tahoe.CHK.Encrypt.decrypt (CHK.readKey r) ct
+
+instance Verifiable SDMF.Verifier where
+    getShareNumbers v s = liftIO $ storageServerGetBuckets s (SDMF.Keys.unStorageIndex $ SDMF.verifierStorageIndex v)
+    getStorageIndex = SDMF.Keys.unStorageIndex . SDMF.verifierStorageIndex
+    getRequiredTotal SDMF.Verifier{..} ss = do
+        shareBytes <- liftIO $ storageServerRead ss (SDMF.Keys.unStorageIndex verifierStorageIndex) 0
+        case decodeOrFail (LB.fromStrict shareBytes) of
+            Left _ -> pure Nothing
+            Right (_, _, sh) -> pure $ pure (fromIntegral $ SDMF.shareRequiredShares sh, fromIntegral $ SDMF.shareTotalShares sh)
+
+instance Readable SDMF.Reader SDMF.Verifier where
+    getVerifiable = SDMF.readerVerifier
+    decodeShare r shareList = do
+        cipherText <- Right <$> liftIO (SDMF.decode r (bimap fromIntegral unWhichSDMF <$> shareList))
+        case cipherText of
+            Left _ -> pure $ Left ShareDecodingFailed
+            Right ct -> do
+                print' ("Got some ciphertext: " <> show ct)
+                print' ("Decrypting with iv: " <> show iv)
+                pure . Right $ SDMF.decrypt readKey iv ct
+              where
+                readKey = SDMF.readerReadKey r
+                iv = SDMF.shareIV (unWhichSDMF . snd . head $ shareList)
+
+print' :: MonadIO m => String -> m ()
+-- print' = liftIO . putStrLn
+print' = const $ pure ()
diff --git a/src/Tahoe/Download/Internal/Client.hs b/src/Tahoe/Download/Internal/Client.hs
index 4b811312ac25b443a0f2ff92863c2ea0b59c1a77..43cc02e6b609319c18f36804146f77d35e005e52 100644
--- a/src/Tahoe/Download/Internal/Client.hs
+++ b/src/Tahoe/Download/Internal/Client.hs
@@ -3,19 +3,25 @@
 -}
 module Tahoe.Download.Internal.Client where
 
+import Control.Exception
 import Control.Monad.IO.Class
 import qualified Data.ByteString as B
+import Data.ByteString.Base32
 import qualified Data.ByteString.Base64 as Base64
+import qualified Data.Set as Set
 import qualified Data.Text as T
 import Data.Text.Encoding
 import Network.Connection
-import Network.HTTP.Client
+import Network.HTTP.Client (Manager, ManagerSettings (managerModifyRequest), Request (requestHeaders))
 import Network.HTTP.Client.TLS
+import Network.HTTP.Types (ByteRange)
 import Servant.Client
 import Tahoe.Announcement
 import Tahoe.CHK.Server (
-    StorageServer,
+    StorageServer (..),
  )
+import TahoeLAFS.Storage.API (CBORSet (CBORSet), ShareNumber (ShareNumber))
+import Text.Read (readMaybe)
 
 -- | Make an HTTPS URL.
 https :: String -> Int -> BaseUrl
@@ -125,3 +131,68 @@ data LookupError
  representation of some value.
 -}
 data DeserializeError = UnknownDeserializeError -- add more later?
+
+type GetShareNumbers = String -> ClientM (CBORSet ShareNumber)
+type ReadShare = String -> ShareNumber -> Maybe [ByteRange] -> ClientM B.ByteString
+
+{- | Create a StorageServer that will speak Great Black Swamp using the given
+ manager to the server at the given host/port.
+-}
+mkWrapper :: GetShareNumbers -> ReadShare -> Manager -> [Char] -> Int -> StorageServer
+mkWrapper getShareNumbers readShare manager host realPort =
+    StorageServer{..}
+  where
+    baseUrl = https host realPort
+    env = mkClientEnv manager baseUrl
+    toBase32 = T.unpack . T.toLower . encodeBase32Unpadded
+
+    storageServerID = undefined
+
+    storageServerWrite = undefined
+
+    storageServerRead storageIndex shareNum = do
+        let clientm = readShare (toBase32 storageIndex) (ShareNumber $ fromIntegral shareNum) Nothing
+        res <- runClientM clientm env
+        case res of
+            Left err -> do
+                throwIO err
+            Right bs -> pure bs
+
+    storageServerGetBuckets storageIndex = do
+        let clientm = getShareNumbers (toBase32 storageIndex)
+        r <- try $ runClientM clientm env
+        case r of
+            Left (_ :: SomeException) -> do
+                pure mempty
+            Right res -> do
+                case res of
+                    Left err -> do
+                        throwIO err
+                    Right (CBORSet s) -> pure $ Set.map (\(ShareNumber i) -> fromIntegral i) s -- XXX fromIntegral aaaaaaaa!!
+
+{- | If possible, populate a StorageServer with functions for operating on data
+  on the server at the given URI.
+-}
+makeServer :: MonadIO m => GetShareNumbers -> ReadShare -> URI -> m (Either LookupError StorageServer)
+makeServer
+    getShareNumbers
+    readShare
+    URI
+        { uriScheme = "pb:"
+        , uriAuthority = Just URIAuth{uriUserInfo = tubid, uriRegName = host, uriPort = (':' : port)}
+        , uriPath = ('/' : swissnum)
+        , uriFragment = "" -- It's a fURL, not a NURL, so there's no fragment.
+        } =
+        case readMaybe port of
+            Nothing -> pure . Left . PortParseError $ port
+            Just realPort -> do
+                manager <- liftIO $ newGBSManager tubid swissnum
+
+                pure . Right $ mkWrapper getShareNumbers readShare manager host realPort
+makeServer _ _ _ = pure . Left $ AnnouncementStructureUnmatched
+
+announcementToStorageServer :: MonadIO m => GetShareNumbers -> ReadShare -> StorageServerAnnouncement -> m (Either LookupError StorageServer)
+announcementToStorageServer getShareNumbers readShare ann =
+    case greatBlackSwampURIs ann of
+        Nothing -> pure . Left . URIParseError $ ann
+        Just uri -> makeServer getShareNumbers readShare uri
diff --git a/src/Tahoe/Download/Internal/Immutable.hs b/src/Tahoe/Download/Internal/Immutable.hs
index f7c9cb8f192c4c9351cddd7fd33989406a5e87bf..00b7efb9431f1f39a2ea70640b54d5196c2e994d 100644
--- a/src/Tahoe/Download/Internal/Immutable.hs
+++ b/src/Tahoe/Download/Internal/Immutable.hs
@@ -1,79 +1,15 @@
 -- | Functionality related to retrieving "immutable" shares (mainly CHK).
 module Tahoe.Download.Internal.Immutable where
 
-import Control.Exception
-import Control.Monad.IO.Class
-import Data.ByteString.Base32
-import qualified Data.Set as Set
-import qualified Data.Text as T
-import Network.HTTP.Client (Manager)
-import Servant.Client
-import Tahoe.Announcement
-import Tahoe.CHK.Server (StorageServer (..))
-import Tahoe.Download.Internal.Client
-import TahoeLAFS.Storage.API (CBORSet (CBORSet), ShareNumber (ShareNumber))
-import TahoeLAFS.Storage.Client
-import Text.Read (readMaybe)
-
-{- | Create a StorageServer that will speak Great Black Swamp using the given
- manager to the server at the given host/port.
--}
-mkImmutableWrapper :: Manager -> [Char] -> Int -> StorageServer
-mkImmutableWrapper manager host realPort =
-    StorageServer{..}
-  where
-    baseUrl = https host realPort
-    env = mkClientEnv manager baseUrl
-    toBase32 = T.unpack . T.toLower . encodeBase32Unpadded
-
-    storageServerID = undefined
-
-    storageServerWrite = undefined
-
-    storageServerRead storageIndex shareNum = do
-        let clientm = readImmutableShare (toBase32 storageIndex) (ShareNumber $ fromIntegral shareNum) Nothing
-        res <- runClientM clientm env
-        case res of
-            Left err -> do
-                throwIO err
-            Right bs -> pure bs
-
-    storageServerGetBuckets storageIndex = do
-        let clientm = getImmutableShareNumbers (toBase32 storageIndex)
-        r <- try $ runClientM clientm env
-        case r of
-            Left (_ :: SomeException) -> do
-                pure mempty
-            Right res -> do
-                case res of
-                    Left err -> do
-                        throwIO err
-                    Right (CBORSet s) -> pure $ Set.map (\(ShareNumber i) -> fromIntegral i) s -- XXX fromIntegral aaaaaaaa!!
-
-{- | Interpret the location in an announcement as a Tahoe-LAFS fURL pointed at
- a Great Black Swamp server.
+import Control.Monad.IO.Class (MonadIO)
+import Tahoe.Announcement (StorageServerAnnouncement)
+import Tahoe.CHK.Server (StorageServer)
+import Tahoe.Download.Internal.Client (LookupError, announcementToStorageServer)
+import TahoeLAFS.Storage.Client (getImmutableShareNumbers, readImmutableShare)
+
+{- | Interpret the location in an announcement as a Tahoe-LAFS fURL pointed at a
+ Great Black Swamp server and construct a StorageServer for interacting with
+ immutable shares stored on it.
 -}
 announcementToImmutableStorageServer :: MonadIO m => StorageServerAnnouncement -> m (Either LookupError StorageServer)
-announcementToImmutableStorageServer ann =
-    case greatBlackSwampURIs ann of
-        Nothing -> pure . Left . URIParseError $ ann
-        Just uri -> makeImmutableServer uri
-
-{- | If possible, populate a StorageServer with functions for operating on
- immutable data on the server at the given URI.
--}
-makeImmutableServer :: MonadIO m => URI -> m (Either LookupError StorageServer)
-makeImmutableServer
-    URI
-        { uriScheme = "pb:"
-        , uriAuthority = Just URIAuth{uriUserInfo = tubid, uriRegName = host, uriPort = (':' : port)}
-        , uriPath = ('/' : swissnum)
-        , uriFragment = "" -- It's a fURL, not a NURL, so there's no fragment.
-        } =
-        case readMaybe port of
-            Nothing -> pure . Left . PortParseError $ port
-            Just realPort -> do
-                manager <- liftIO $ newGBSManager tubid swissnum
-
-                pure . Right $ mkImmutableWrapper manager host realPort
-makeImmutableServer _ = pure . Left $ AnnouncementStructureUnmatched
+announcementToImmutableStorageServer = announcementToStorageServer getImmutableShareNumbers readImmutableShare
diff --git a/src/Tahoe/Download/Internal/Mutable.hs b/src/Tahoe/Download/Internal/Mutable.hs
new file mode 100644
index 0000000000000000000000000000000000000000..011e60142a7d9a6db7f75ea2d3bc9f06e0af319a
--- /dev/null
+++ b/src/Tahoe/Download/Internal/Mutable.hs
@@ -0,0 +1,15 @@
+-- | Functionality related to retrieving "mutable" shares (for example, SDMF).
+module Tahoe.Download.Internal.Mutable where
+
+import Control.Monad.IO.Class (MonadIO)
+import Tahoe.Announcement (StorageServerAnnouncement)
+import Tahoe.CHK.Server (StorageServer)
+import Tahoe.Download.Internal.Client (LookupError, announcementToStorageServer)
+import TahoeLAFS.Storage.Client (getMutableShareNumbers, readMutableShares)
+
+{- | Interpret the location in an announcement as a Tahoe-LAFS fURL pointed at a
+ Great Black Swamp server and construct a StorageServer for interacting with
+ mutable shares stored on it.
+-}
+announcementToMutableStorageServer :: MonadIO m => StorageServerAnnouncement -> m (Either LookupError StorageServer)
+announcementToMutableStorageServer = announcementToStorageServer getMutableShareNumbers readMutableShares
diff --git a/test/Generators.hs b/test/Generators.hs
index 3adc7c662bbccce84fecc99c0cb6251bac5eb1fa..7827221f4e4200997a4ff55165e44577bdcb0af1 100644
--- a/test/Generators.hs
+++ b/test/Generators.hs
@@ -1,11 +1,19 @@
 module Generators where
 
+import qualified Crypto.PubKey.RSA as RSA
+import Data.ASN1.BinaryEncoding (DER (DER))
+import Data.ASN1.Encoding (ASN1Decoding (decodeASN1))
+import Data.ASN1.Types (ASN1Object (fromASN1))
+import Data.Bifunctor (Bifunctor (first))
 import Data.ByteString.Base32 (encodeBase32Unpadded)
+import qualified Data.ByteString.Lazy as LB
 import Data.Int (Int64)
 import qualified Data.Text as T
+import Data.X509 (PrivKey (PrivKeyRSA))
 import Hedgehog (MonadGen)
 import qualified Hedgehog.Gen as Gen
 import qualified Hedgehog.Range as Range
+import System.IO.Unsafe (unsafePerformIO)
 import Tahoe.Announcement (Announcements (..), StorageServerAnnouncement (..))
 import Tahoe.CHK.Types (Parameters (..))
 
@@ -37,3 +45,30 @@ genStorageServerAnnouncements =
         <$> Gen.maybe (Gen.text (Range.linear 16 32) Gen.ascii)
         <*> Gen.maybe (Gen.text (Range.linear 16 32) Gen.ascii)
         <*> Gen.maybe (Gen.bytes $ Range.singleton 32)
+
+{- | Build RSA key pairs.
+
+ Because the specific bits of the key pair shouldn't make any difference to
+ any application logic, generating new RSA key pairs is expensive, and
+ generating new RSA key pairs in a way that makes sense in Hedgehog is
+ challenging, this implementation just knows a few RSA key pairs already and
+ will give back one of them.
+-}
+genRSAKeys :: MonadGen m => m RSA.PrivateKey
+genRSAKeys = Gen.element (map rsaKeyPair rsaKeyPairBytes)
+
+-- I'm not sure how to do IO in MonadGen so do the IO up front unsafely (but
+-- hopefully not really unsafely).
+rsaKeyPairBytes :: [LB.ByteString]
+{-# NOINLINE rsaKeyPairBytes #-}
+rsaKeyPairBytes = unsafePerformIO $ mapM (\n -> LB.readFile ("test/data/rsa-privkey-" <> show n <> ".der")) [0 .. 4 :: Int]
+
+rsaKeyPair :: LB.ByteString -> RSA.PrivateKey
+rsaKeyPair bs = do
+    let (Right kp) = do
+            asn1s <- first show (decodeASN1 DER bs)
+            (r, _) <- fromASN1 asn1s
+            case r of
+                PrivKeyRSA pk -> pure pk
+                _ -> error "Expected RSA Private Key"
+    kp
diff --git a/test/Spec.hs b/test/Spec.hs
index 91a9191fd9cb0223b4fb905c9838bf9d612c93df..f9111f66f629bfb092760f0eccc88869082f071a 100644
--- a/test/Spec.hs
+++ b/test/Spec.hs
@@ -3,6 +3,7 @@ module Main where
 import Control.Exception (Exception, throwIO)
 import Control.Monad (replicateM, zipWithM_)
 import Control.Monad.IO.Class (liftIO)
+import Crypto.Cipher.Types (nullIV)
 import Crypto.Classes (buildKey)
 import qualified Data.Binary as Binary
 import qualified Data.ByteString as B
@@ -15,8 +16,8 @@ import qualified Data.Text as T
 import Data.Text.Encoding (encodeUtf8)
 import Data.Word (Word16)
 import qualified Data.Yaml as Yaml
-import Generators (genAnnouncements, genParameters)
-import Hedgehog (MonadGen, diff, forAll, property, tripping)
+import Generators (genAnnouncements, genParameters, genRSAKeys)
+import Hedgehog (MonadGen, annotateShow, diff, forAll, property, tripping)
 import qualified Hedgehog.Gen as Gen
 import qualified Hedgehog.Range as Range
 import System.IO (hSetEncoding, stderr, stdout, utf8)
@@ -41,6 +42,8 @@ import Tahoe.Download (
     LookupServer,
     download,
  )
+import qualified Tahoe.SDMF as SDMF
+import qualified Tahoe.SDMF.Keys as SDMF.Keys
 import Tahoe.Server (memoryStorageServer)
 import Test.Tasty (TestTree, defaultMain, testGroup)
 import Test.Tasty.HUnit (assertEqual, testCase)
@@ -244,7 +247,7 @@ tests =
                 "download should fail with details about unreachable server"
                 (Left (NotEnoughDecodedShares{notEnoughDecodedSharesNeeded = 3, notEnoughDecodedSharesFound = 0}))
                 result
-        , testProperty "success" $
+        , testProperty "chk success" $
             property $ do
                 -- If we can recover enough distinct, decodeable shares from the
                 -- configured servers then we can recover the application data.
@@ -282,7 +285,7 @@ tests =
                 (shares, cap) <- liftIO $ Tahoe.CHK.encode key params ciphertext
 
                 -- Distribute the shares.
-                liftIO $ placeShares cap (Binary.encode <$> shares) perServerShareCount servers 0
+                liftIO $ placeShares (storageIndex . verifier $ cap) (Binary.encode <$> shares) perServerShareCount servers 0
 
                 let serverMap = Map.fromList $ zip (Set.toList serverIDs') servers
                     lookupServer = someServers serverMap
@@ -291,6 +294,50 @@ tests =
                 -- Recover the plaintext from the servers.
                 result <- liftIO $ download serverAnnouncements cap lookupServer
                 diff (Right plaintext) (==) result
+        , testProperty "ssk success" $
+            property $ do
+                -- Like "chk success" above, but for SDMF (a case of SSK).
+                plaintext <- forAll $ BL.fromStrict <$> Gen.bytes (Range.exponential 56 1024)
+                sequenceNumber <- forAll $ Gen.integral (Range.exponential 1 10000)
+                keypair <- SDMF.Keys.KeyPair <$> forAll genRSAKeys
+                Parameters{paramRequiredShares = required, paramTotalShares = total} <- forAll genParameters
+
+                -- Since multiple shares can be placed on a single server, as long
+                -- as we have one server we have a valid case.  Since some shares
+                -- might be placed non-optimally it is also nice to allow for some
+                -- empty servers so allow for that as well.
+                let numServers = Range.linear 1 (fromIntegral total + 1)
+                    serverIDs = Gen.text (Range.singleton 2) Gen.ascii
+                serverIDs' <- forAll $ Gen.set numServers serverIDs
+
+                perServerShareCount <-
+                    forAll $
+                        genListWithSum (length serverIDs') (fromIntegral total)
+
+                -- Make the servers.
+                servers <- liftIO $ replicateM (length serverIDs') memoryStorageServer
+
+                -- Derive the keys, encode the data.
+                let -- Not a very good IV choice in reality but it's okay for
+                    -- this test where confidentiality and key secrecy is not
+                    -- particularly a concern.
+                    iv = SDMF.Keys.SDMF_IV nullIV
+                    ciphertext = SDMF.encrypt keypair iv plaintext
+                annotateShow ciphertext
+                annotateShow iv
+                (shares, writeCap) <- liftIO $ SDMF.encode keypair iv sequenceNumber required total ciphertext
+                let storageIndex = SDMF.Keys.unStorageIndex . SDMF.verifierStorageIndex . SDMF.readerVerifier . SDMF.writerReader $ writeCap
+                    readCap = SDMF.writerReader writeCap
+                -- Distribute the shares.
+                liftIO $ placeShares storageIndex (Binary.encode <$> shares) perServerShareCount servers 0
+
+                let serverMap = Map.fromList $ zip (Set.toList serverIDs') servers
+                    lookupServer = someServers serverMap
+                    serverAnnouncements = Map.fromSet makeAnn serverIDs'
+
+                -- Recover the plaintext from the servers.
+                result <- liftIO $ download serverAnnouncements readCap lookupServer
+                diff (Right plaintext) (==) result
         , testCase "immutable upload/download to using Great Black Swamp" $ do
             pure ()
             -- Consider moving these tests to another module, they're pretty
@@ -327,7 +374,7 @@ tests =
     -- We wish that share numbers were an opaque type instead of a
     -- numeric/integral type.  This is not the place to argue the point
     -- though.
-    placeShares :: Reader -> [BL.ByteString] -> [Int] -> [StorageServer] -> Int -> IO ()
+    placeShares :: B.ByteString -> [BL.ByteString] -> [Int] -> [StorageServer] -> Int -> IO ()
     -- Out of shares, done.
     placeShares _ [] _ _ _ = pure ()
     -- Out of placement info but not out of shares is a programming error.
@@ -335,14 +382,14 @@ tests =
     -- Out of servers but not out of shares is a programming error.
     placeShares _ _ _ [] _ = throwIO RanOutOfServers
     -- Having some of all three means we can make progress.
-    placeShares cap shares (n : ns) (s : ss) sharesSoFar = do
+    placeShares si shares (n : ns) (s : ss) sharesSoFar = do
         -- write the right number of shares to this server
         zipWithM_
-            (\shnum share -> storageServerWrite s (storageIndex . verifier $ cap) shnum 0 share)
+            (\shnum share -> storageServerWrite s si shnum 0 share)
             [fromIntegral sharesSoFar ..]
             (BL.toStrict <$> take n shares)
         -- recurse to write the rest
-        placeShares cap (drop n shares) ns ss (sharesSoFar + n)
+        placeShares si (drop n shares) ns ss (sharesSoFar + n)
 
     -- Make up a distinct (but nonsense) announcement for a given storage
     -- server identifier.
diff --git a/test/data/rsa-privkey-0.der b/test/data/rsa-privkey-0.der
new file mode 100644
index 0000000000000000000000000000000000000000..e3bb393ed5637f17db469248532b44420cfefc8f
Binary files /dev/null and b/test/data/rsa-privkey-0.der differ
diff --git a/test/data/rsa-privkey-1.der b/test/data/rsa-privkey-1.der
new file mode 100644
index 0000000000000000000000000000000000000000..9bffed68a1b2dcae3bb7a666d71540b26fbfd8d8
Binary files /dev/null and b/test/data/rsa-privkey-1.der differ
diff --git a/test/data/rsa-privkey-2.der b/test/data/rsa-privkey-2.der
new file mode 100644
index 0000000000000000000000000000000000000000..df86447a4ead9bde2e5000614e59cd2149ae505e
Binary files /dev/null and b/test/data/rsa-privkey-2.der differ
diff --git a/test/data/rsa-privkey-3.der b/test/data/rsa-privkey-3.der
new file mode 100644
index 0000000000000000000000000000000000000000..b8a87219665ec0f04855d05596603032725e0d36
Binary files /dev/null and b/test/data/rsa-privkey-3.der differ
diff --git a/test/data/rsa-privkey-4.der b/test/data/rsa-privkey-4.der
new file mode 100644
index 0000000000000000000000000000000000000000..c9d0474ce9b4f22a4f32220cad76659ae54b0b98
Binary files /dev/null and b/test/data/rsa-privkey-4.der differ