diff --git a/ChangeLog.md b/ChangeLog.md
index 1f45291edfedfe4b70714f5fdd096014f50cb3f0..bbb7cdeb70f5fc7464cacd87af1f8c3039c1bd19 100644
--- a/ChangeLog.md
+++ b/ChangeLog.md
@@ -1,5 +1,12 @@
 # Changelog for tahoe-great-black-swamp
 
+## 0.5.0.0
+
+* The primary definition of most types related to the ``Backend`` class have moved to ``tahoe-great-black-swamp-types``.
+  They are still re-exported from their original location for convenience but the new location should be preferred.
+
+* The Filesystem and Memory backends now support operations on mutables.
+
 ## 0.4.0.1
 
 * Package metadata improvements.
diff --git a/flake.lock b/flake.lock
index 722a7ec905dc3e4d9d58f515ca459283a3e88231..c36c03710617dbbc00c21e5e1bb0e5446503c6f4 100644
--- a/flake.lock
+++ b/flake.lock
@@ -16,22 +16,6 @@
         "type": "github"
       }
     },
-    "flake-compat_2": {
-      "flake": false,
-      "locked": {
-        "lastModified": 1673956053,
-        "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
-        "owner": "edolstra",
-        "repo": "flake-compat",
-        "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
-        "type": "github"
-      },
-      "original": {
-        "owner": "edolstra",
-        "repo": "flake-compat",
-        "type": "github"
-      }
-    },
     "flake-utils": {
       "locked": {
         "lastModified": 1676283394,
@@ -78,12 +62,15 @@
       }
     },
     "flake-utils_4": {
+      "inputs": {
+        "systems": "systems"
+      },
       "locked": {
-        "lastModified": 1676283394,
-        "narHash": "sha256-XX2f9c3iySLCw54rJ/CZs+ZK6IQy7GXNY4nSOyu2QG4=",
+        "lastModified": 1694529238,
+        "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
         "owner": "numtide",
         "repo": "flake-utils",
-        "rev": "3db36a8b464d0c4532ba1c7dda728f4576d6d073",
+        "rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
         "type": "github"
       },
       "original": {
@@ -92,32 +79,27 @@
         "type": "github"
       }
     },
-    "gitignore": {
+    "flake-utils_5": {
       "inputs": {
-        "nixpkgs": [
-          "hs-flake-utils",
-          "pre-commit-hooks",
-          "nixpkgs"
-        ]
+        "systems": "systems_2"
       },
       "locked": {
-        "lastModified": 1660459072,
-        "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=",
-        "owner": "hercules-ci",
-        "repo": "gitignore.nix",
-        "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73",
+        "lastModified": 1694529238,
+        "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
+        "owner": "numtide",
+        "repo": "flake-utils",
+        "rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
         "type": "github"
       },
       "original": {
-        "owner": "hercules-ci",
-        "repo": "gitignore.nix",
+        "owner": "numtide",
+        "repo": "flake-utils",
         "type": "github"
       }
     },
-    "gitignore_2": {
+    "gitignore": {
       "inputs": {
         "nixpkgs": [
-          "tahoe-chk",
           "hs-flake-utils",
           "pre-commit-hooks",
           "nixpkgs"
@@ -158,27 +140,6 @@
         "url": "https://whetstone.private.storage/jcalderone/hs-flake-utils.git"
       }
     },
-    "hs-flake-utils_2": {
-      "inputs": {
-        "flake-utils": "flake-utils_4",
-        "nixpkgs": "nixpkgs_2",
-        "pre-commit-hooks": "pre-commit-hooks_2"
-      },
-      "locked": {
-        "lastModified": 1677773826,
-        "narHash": "sha256-xJmOtHugr4k2zNhP/AF6JdIUnIEyM+TEspLn2n5kloc=",
-        "ref": "main",
-        "rev": "d3a83fdd9563546ca41771186427638e685a2e2b",
-        "revCount": 9,
-        "type": "git",
-        "url": "https://whetstone.private.storage/jcalderone/hs-flake-utils.git"
-      },
-      "original": {
-        "ref": "main",
-        "type": "git",
-        "url": "https://whetstone.private.storage/jcalderone/hs-flake-utils.git"
-      }
-    },
     "nixpkgs": {
       "locked": {
         "lastModified": 1677624842,
@@ -211,38 +172,6 @@
         "type": "github"
       }
     },
-    "nixpkgs-stable_2": {
-      "locked": {
-        "lastModified": 1673800717,
-        "narHash": "sha256-SFHraUqLSu5cC6IxTprex/nTsI81ZQAtDvlBvGDWfnA=",
-        "owner": "NixOS",
-        "repo": "nixpkgs",
-        "rev": "2f9fd351ec37f5d479556cd48be4ca340da59b8f",
-        "type": "github"
-      },
-      "original": {
-        "owner": "NixOS",
-        "ref": "nixos-22.11",
-        "repo": "nixpkgs",
-        "type": "github"
-      }
-    },
-    "nixpkgs_2": {
-      "locked": {
-        "lastModified": 1677624842,
-        "narHash": "sha256-4DF9DbDuK4/+KYx0L6XcPBeDHUFVCtzok2fWtwXtb5w=",
-        "owner": "nixos",
-        "repo": "nixpkgs",
-        "rev": "d70f5cd5c3bef45f7f52698f39e7cc7a89daa7f0",
-        "type": "github"
-      },
-      "original": {
-        "owner": "nixos",
-        "ref": "nixos-22.11",
-        "repo": "nixpkgs",
-        "type": "github"
-      }
-    },
     "pre-commit-hooks": {
       "inputs": {
         "flake-compat": "flake-compat",
@@ -271,69 +200,128 @@
         "type": "github"
       }
     },
-    "pre-commit-hooks_2": {
+    "root": {
       "inputs": {
-        "flake-compat": "flake-compat_2",
-        "flake-utils": [
-          "tahoe-chk",
-          "hs-flake-utils",
-          "flake-utils"
-        ],
-        "gitignore": "gitignore_2",
+        "flake-utils": "flake-utils",
+        "hs-flake-utils": "hs-flake-utils",
         "nixpkgs": [
-          "tahoe-chk",
           "hs-flake-utils",
           "nixpkgs"
         ],
-        "nixpkgs-stable": "nixpkgs-stable_2"
+        "tahoe-chk": "tahoe-chk",
+        "tahoe-great-black-swamp-testing": "tahoe-great-black-swamp-testing",
+        "tahoe-great-black-swamp-types": "tahoe-great-black-swamp-types"
+      }
+    },
+    "systems": {
+      "locked": {
+        "lastModified": 1681028828,
+        "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+        "owner": "nix-systems",
+        "repo": "default",
+        "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+        "type": "github"
       },
+      "original": {
+        "owner": "nix-systems",
+        "repo": "default",
+        "type": "github"
+      }
+    },
+    "systems_2": {
       "locked": {
-        "lastModified": 1677722096,
-        "narHash": "sha256-7mjVMvCs9InnrRybBfr5ohqcOz+pyEX8m22C1XsDilg=",
-        "owner": "cachix",
-        "repo": "pre-commit-hooks.nix",
-        "rev": "61a3511668891c68ebd19d40122150b98dc2fe3b",
+        "lastModified": 1681028828,
+        "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+        "owner": "nix-systems",
+        "repo": "default",
+        "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
         "type": "github"
       },
       "original": {
-        "owner": "cachix",
-        "repo": "pre-commit-hooks.nix",
+        "owner": "nix-systems",
+        "repo": "default",
         "type": "github"
       }
     },
-    "root": {
+    "tahoe-chk": {
       "inputs": {
-        "flake-utils": "flake-utils",
-        "hs-flake-utils": "hs-flake-utils",
+        "flake-utils": "flake-utils_3",
+        "hs-flake-utils": [
+          "hs-flake-utils"
+        ],
         "nixpkgs": [
           "hs-flake-utils",
           "nixpkgs"
+        ]
+      },
+      "locked": {
+        "lastModified": 1696270283,
+        "narHash": "sha256-0/6VEsjXe7EvYY2BnkWkmHCVzdp1WcFLjx5mvHDMLnM=",
+        "ref": "main",
+        "rev": "57f02ab7d73b831afb0d2c8196bd3b02764bf29c",
+        "revCount": 488,
+        "type": "git",
+        "url": "https://whetstone.private.storage/PrivateStorage/tahoe-chk.git"
+      },
+      "original": {
+        "ref": "main",
+        "type": "git",
+        "url": "https://whetstone.private.storage/PrivateStorage/tahoe-chk.git"
+      }
+    },
+    "tahoe-great-black-swamp-testing": {
+      "inputs": {
+        "flake-utils": "flake-utils_4",
+        "hs-flake-utils": [
+          "hs-flake-utils"
         ],
-        "tahoe-chk": "tahoe-chk"
+        "nixpkgs": [
+          "hs-flake-utils",
+          "nixpkgs"
+        ],
+        "tahoe-great-black-swamp-types": [
+          "tahoe-great-black-swamp-types"
+        ]
+      },
+      "locked": {
+        "lastModified": 1699477067,
+        "narHash": "sha256-PwyrDMZXAhLb94XtRx2MIOz/77Zm5I7P8ag23WAFRPE=",
+        "ref": "main",
+        "rev": "1b34f87d90e3dbc61401ea46e4f02303c35aaf6d",
+        "revCount": 6,
+        "type": "git",
+        "url": "https://gitlab.com/tahoe-lafs/tahoe-great-black-swamp-testing"
+      },
+      "original": {
+        "ref": "main",
+        "type": "git",
+        "url": "https://gitlab.com/tahoe-lafs/tahoe-great-black-swamp-testing"
       }
     },
-    "tahoe-chk": {
+    "tahoe-great-black-swamp-types": {
       "inputs": {
-        "flake-utils": "flake-utils_3",
-        "hs-flake-utils": "hs-flake-utils_2",
+        "flake-utils": "flake-utils_5",
+        "hs-flake-utils": [
+          "hs-flake-utils"
+        ],
         "nixpkgs": [
           "hs-flake-utils",
           "nixpkgs"
         ]
       },
       "locked": {
-        "lastModified": 1696262854,
-        "narHash": "sha256-0/6VEsjXe7EvYY2BnkWkmHCVzdp1WcFLjx5mvHDMLnM=",
-        "ref": "refs/tags/0.2.0.0",
-        "rev": "42ae52257ec6e6d8eaa9a56662ca5edfbce8074b",
-        "revCount": 487,
+        "lastModified": 1699453849,
+        "narHash": "sha256-7qqW8WwJ3yDrEsO9EfL9GxJo8piV5QeR7/GI9yYngaU=",
+        "ref": "main",
+        "rev": "bb60f23c8660db38a2ff89ec731aaa3a08c9d7ba",
+        "revCount": 10,
         "type": "git",
-        "url": "https://whetstone.private.storage/PrivateStorage/tahoe-chk"
+        "url": "https://gitlab.com/tahoe-lafs/tahoe-great-black-swamp-types"
       },
       "original": {
-        "ref": "refs/tags/0.2.0.0",
+        "ref": "main",
         "type": "git",
-        "url": "https://whetstone.private.storage/PrivateStorage/tahoe-chk"
+        "url": "https://gitlab.com/tahoe-lafs/tahoe-great-black-swamp-types"
       }
     }
   },
diff --git a/flake.nix b/flake.nix
index b1126ee4b1666dc7117d829516bd9ef87cb1f41a..792f16bb36fa9eb53a511bf0565bae1d04b8f7d4 100644
--- a/flake.nix
+++ b/flake.nix
@@ -8,8 +8,20 @@
     hs-flake-utils.url = "git+https://whetstone.private.storage/jcalderone/hs-flake-utils.git?ref=main";
 
     tahoe-chk = {
-      url = "git+https://whetstone.private.storage/PrivateStorage/tahoe-chk?ref=refs/tags/0.2.0.0";
+      url = "git+https://whetstone.private.storage/PrivateStorage/tahoe-chk.git?ref=main";
       inputs.nixpkgs.follows = "hs-flake-utils/nixpkgs";
+      inputs.hs-flake-utils.follows = "hs-flake-utils";
+    };
+    tahoe-great-black-swamp-types = {
+      url = "git+https://gitlab.com/tahoe-lafs/tahoe-great-black-swamp-types?ref=main";
+      inputs.nixpkgs.follows = "hs-flake-utils/nixpkgs";
+      inputs.hs-flake-utils.follows = "hs-flake-utils";
+    };
+    tahoe-great-black-swamp-testing = {
+      url = "git+https://gitlab.com/tahoe-lafs/tahoe-great-black-swamp-testing?ref=main";
+      inputs.nixpkgs.follows = "hs-flake-utils/nixpkgs";
+      inputs.hs-flake-utils.follows = "hs-flake-utils";
+      inputs.tahoe-great-black-swamp-types.follows = "tahoe-great-black-swamp-types";
     };
   };
 
@@ -19,6 +31,8 @@
     flake-utils,
     hs-flake-utils,
     tahoe-chk,
+    tahoe-great-black-swamp-types,
+    tahoe-great-black-swamp-testing,
   }: let
     ulib = flake-utils.lib;
     ghcVersion = "ghc8107";
@@ -43,6 +57,8 @@
         packageName = "haskell-tahoe-lafs-storage-server";
         hsPkgsOverrides = hfinal: hprev: {
           tahoe-chk = tahoe-chk.outputs.packages.${system}.default;
+          tahoe-great-black-swamp-types = tahoe-great-black-swamp-types.outputs.packages.${system}.default;
+          tahoe-great-black-swamp-testing = tahoe-great-black-swamp-testing.outputs.packages.${system}.default;
         };
       };
     in {
@@ -54,36 +70,30 @@
             # library pieces, so make sure pkg-config is around.
             pkg-config
             # And the C library pieces, too.
-            zlib
+            zlib.dev
           ];
       };
       packages = hslib.packages {};
       apps = {
-        generate-cabal-project = {
-          type = "app";
-          program = "${
-            pkgs.writeShellApplication {
-              name = "generate-cabal-project";
-              text = ''
-                cat >cabal.project.local <<EOF
-                -- This file is auto-generated by the flake devShell's shellHook.  Do
-                -- not edit this file.  Make changes in flake.nix.
-                package zlib
-                  -- by default the cabal build won't use pkg-config to find the
-                  -- underlying zlib c library.  this will most likely cause
-                  -- "cabal build" to fail (unless other steps are taken).  turn
-                  -- on pkg-config so that the cabal-test app below can succeed.
-                  flags: -bundled-c-zlib -non-blocking-ffi +pkg-config
-                EOF
-              '';
-            }
-          }/bin/generate-cabal-project";
+        write-cabal-project = hslib.apps.write-cabal-project {
+          localPackages = {
+            tahoe-chk = tahoe-chk.sourceInfo.outPath;
+            tahoe-great-black-swamp-types = tahoe-great-black-swamp-types.sourceInfo.outPath;
+            tahoe-great-black-swamp-testing = tahoe-great-black-swamp-testing.sourceInfo.outPath;
+          };
         };
         cabal-test = hslib.apps.cabal-test {
           preBuild = ''
             # Refresh the cabal.project.local file to point to the correct
             # dependencies, if necessary.
-            nix run .#generate-cabal-project
+            nix run .#write-cabal-project
+
+            # Also, zlib doesn't want to use pkg-config by default.  Convince
+            # it...
+            cat >>cabal.project.local <<EOF
+            package zlib
+                flags: +pkg-config
+            EOF
 
             # Here we make zlib discoverable by pkg-config so cabal can find
             # headers and stuff.
diff --git a/src/TahoeLAFS/Storage/API.hs b/src/TahoeLAFS/Storage/API.hs
index b4b5bd0b35ac40816fd5e4cb117b972eb5fdc3c3..5b0a38bf75c63e7d8388976d103db7ddaa50fd46 100644
--- a/src/TahoeLAFS/Storage/API.hs
+++ b/src/TahoeLAFS/Storage/API.hs
@@ -1,15 +1,11 @@
 {-# LANGUAGE DataKinds #-}
--- https://artyom.me/aeson#records-and-json-generics
-{-# LANGUAGE DeriveAnyClass #-}
--- https://artyom.me/aeson#records-and-json-generics
-{-# LANGUAGE DeriveGeneric #-}
 {-# LANGUAGE DerivingStrategies #-}
 {-# LANGUAGE FlexibleInstances #-}
--- Supports derivations for ShareNumber
-{-# LANGUAGE GeneralizedNewtypeDeriving #-}
 {-# LANGUAGE OverloadedStrings #-}
 {-# LANGUAGE PackageImports #-}
 {-# LANGUAGE TypeOperators #-}
+-- TODO Move the orphans
+{-# OPTIONS_GHC -fno-warn-orphans #-}
 
 module TahoeLAFS.Storage.API (
     Version (..),
@@ -59,7 +55,6 @@ import qualified Codec.Serialise.Encoding as CSE
 import Control.Monad
 import Data.Aeson (
     FromJSON (..),
-    FromJSONKey (..),
     ToJSON (..),
     ToJSONKey (..),
     camelTo2,
@@ -73,11 +68,9 @@ import Data.Aeson.Types (
     toJSONKeyText,
  )
 import Data.Bifunctor (Bifunctor (bimap))
-import Data.ByteArray (constEq)
 import qualified Data.ByteString as B
 import qualified "base64-bytestring" Data.ByteString.Base64 as Base64
 import qualified Data.Map as Map
-import Data.Map.Merge.Strict (merge, preserveMissing, zipWithMatched)
 import Data.Map.Strict (
     Map,
  )
@@ -86,9 +79,6 @@ import qualified Data.Text as T
 import Data.Text.Encoding (
     decodeUtf8',
  )
-import GHC.Generics (
-    Generic,
- )
 import Network.HTTP.Types (
     ByteRanges,
     parseByteRanges,
@@ -110,6 +100,35 @@ import Servant (
     (:<|>),
     (:>),
  )
+import Tahoe.Storage.Backend (
+    AllocateBuckets (..),
+    AllocationResult (..),
+    ApplicationVersion,
+    CBORSet (..),
+    CorruptionDetails (..),
+    LeaseSecret (..),
+    Offset,
+    QueryRange,
+    ReadResult,
+    ReadTestWriteResult (..),
+    ReadTestWriteVectors (..),
+    ReadVector (..),
+    ShareData,
+    ShareNumber (..),
+    Size,
+    StorageIndex,
+    TestOperator (..),
+    TestVector (..),
+    TestWriteVectors (..),
+    UploadSecret (..),
+    Version (..),
+    Version1Parameters (..),
+    WriteEnablerSecret (..),
+    WriteVector (..),
+    readv,
+    testv,
+    writev,
+ )
 import TahoeLAFS.Internal.ServantUtil (
     CBOR,
  )
@@ -141,37 +160,6 @@ leaseRenewSecretLength = 32
 leaseCancelSecretLength :: Num a => a
 leaseCancelSecretLength = 32
 
-type ApplicationVersion = B.ByteString
-type Size = Integer
-type Offset = Integer
-type QueryRange = Maybe ByteRanges
-
--- TODO These should probably all be byte strings instead.
-type StorageIndex = String
-type ShareData = B.ByteString
-
-newtype ShareNumber = ShareNumber Integer
-    deriving
-        ( Show
-        , Eq
-        , Ord
-        , Generic
-        )
-    deriving newtype
-        ( ToJSON
-        , FromJSON
-        , FromJSONKey
-        )
-
-{- | A new type for which we can define our own CBOR serialisation rules.  The
- cborg library provides a Serialise instance for Set which is not compatible
- with the representation required by Tahoe-LAFS.
--}
-newtype CBORSet a = CBORSet
-    { getCBORSet :: Set.Set a
-    }
-    deriving newtype (ToJSON, FromJSON, Show, Eq)
-
 -- | Encode a CBORSet using a CBOR "set" tag and a determinate length list.
 encodeCBORSet :: (Serialise a) => CBORSet a -> CSE.Encoding
 encodeCBORSet (CBORSet theSet) =
@@ -237,13 +225,6 @@ shareNumber n =
 toInteger :: ShareNumber -> Integer
 toInteger (ShareNumber i) = i
 
-data Version1Parameters = Version1Parameters
-    { maximumImmutableShareSize :: Size
-    , maximumMutableShareSize :: Size
-    , availableSpace :: Size
-    }
-    deriving (Show, Eq, Generic)
-
 encodeVersion1Parameters :: Version1Parameters -> CSE.Encoding
 encodeVersion1Parameters Version1Parameters{..} =
     CSE.encodeMapLen 3 -- three rings for the elven kings
@@ -296,12 +277,6 @@ instance ToJSON Version1Parameters where
 instance FromJSON Version1Parameters where
     parseJSON = genericParseJSON tahoeJSONOptions
 
-data Version = Version
-    { parameters :: Version1Parameters
-    , applicationVersion :: ApplicationVersion
-    }
-    deriving (Show, Eq, Generic)
-
 encodeApplicationVersion :: ApplicationVersion -> CSE.Encoding
 encodeApplicationVersion = CSE.encodeBytes
 
@@ -353,12 +328,6 @@ instance ToJSON Version where
 instance FromJSON Version where
     parseJSON = genericParseJSON tahoeJSONOptions
 
-data AllocateBuckets = AllocateBuckets
-    { shareNumbers :: [ShareNumber]
-    , allocatedSize :: Size
-    }
-    deriving (Show, Eq, Generic)
-
 -- XXX This derived instance is surely not compatible with Tahoe-LAFS.
 instance Serialise AllocateBuckets
 
@@ -368,12 +337,6 @@ instance ToJSON AllocateBuckets where
 instance FromJSON AllocateBuckets where
     parseJSON = genericParseJSON tahoeJSONOptions
 
-data AllocationResult = AllocationResult
-    { alreadyHave :: [ShareNumber]
-    , allocated :: [ShareNumber]
-    }
-    deriving (Show, Eq, Generic)
-
 -- XXX This derived instance is surely not compatible with Tahoe-LAFS.
 instance Serialise AllocationResult
 
@@ -383,11 +346,6 @@ instance ToJSON AllocationResult where
 instance FromJSON AllocationResult where
     parseJSON = genericParseJSON tahoeJSONOptions
 
-newtype CorruptionDetails = CorruptionDetails
-    { reason :: String
-    }
-    deriving (Show, Eq, Generic)
-
 -- XXX This derived instance is surely not compatible with Tahoe-LAFS.
 instance Serialise CorruptionDetails
 
@@ -412,14 +370,6 @@ instance ToHttpApiData ByteRanges where
     toUrlPiece _ = error "Cannot serialize ByteRanges to URL piece"
     toQueryParam _ = error "Cannot serialize ByteRanges to query params"
 
-newtype UploadSecret = UploadSecret B.ByteString
-newtype WriteEnablerSecret = WriteEnablerSecret B.ByteString
-
-instance Eq WriteEnablerSecret where
-    (WriteEnablerSecret left) == (WriteEnablerSecret right) = constEq left right
-
-data LeaseSecret = Renew B.ByteString | Cancel B.ByteString | Upload UploadSecret | Write WriteEnablerSecret
-
 isUploadSecret :: LeaseSecret -> Bool
 isUploadSecret (Upload _) = True
 isUploadSecret _ = False
@@ -528,14 +478,6 @@ type StorageAPI =
                 :<|> "mutable" :> AdviseCorrupt
            )
 
-type ReadResult = Map ShareNumber [ShareData]
-
-data ReadTestWriteResult = ReadTestWriteResult
-    { success :: Bool
-    , readData :: ReadResult
-    }
-    deriving (Show, Eq, Generic)
-
 -- XXX This derived instance is surely not compatible with Tahoe-LAFS.
 instance Serialise ReadTestWriteResult
 
@@ -545,12 +487,6 @@ instance ToJSON ReadTestWriteResult where
 instance FromJSON ReadTestWriteResult where
     parseJSON = genericParseJSON tahoeJSONOptions
 
-data ReadTestWriteVectors = ReadTestWriteVectors
-    { testWriteVectors :: Map ShareNumber TestWriteVectors
-    , readVector :: [ReadVector]
-    }
-    deriving (Show, Eq, Generic)
-
 -- XXX This derived instance is surely not compatible with Tahoe-LAFS.
 instance Serialise ReadTestWriteVectors
 
@@ -560,12 +496,6 @@ instance ToJSON ReadTestWriteVectors where
 instance FromJSON ReadTestWriteVectors where
     parseJSON = genericParseJSON tahoeJSONOptions
 
-data ReadVector = ReadVector
-    { offset :: Offset
-    , readSize :: Size
-    }
-    deriving (Show, Eq, Generic)
-
 -- XXX This derived instance is surely not compatible with Tahoe-LAFS.
 instance Serialise ReadVector
 
@@ -575,72 +505,15 @@ instance ToJSON ReadVector where
 instance FromJSON ReadVector where
     parseJSON = genericParseJSON tahoeJSONOptions
 
-data TestWriteVectors = TestWriteVectors
-    { test :: [TestVector]
-    , write :: [WriteVector]
-    , newLength :: Maybe Integer
-    }
-    deriving (Show, Eq, Generic, ToJSON, FromJSON)
-
-instance Semigroup TestWriteVectors where
-    (TestWriteVectors testL writeL _) <> (TestWriteVectors testR writeR newLengthR) =
-        TestWriteVectors (testL <> testR) (writeL <> writeR) newLengthR
-
-instance Monoid TestWriteVectors where
-    mempty = TestWriteVectors mempty mempty Nothing
-
-instance Monoid ReadTestWriteVectors where
-    mempty = ReadTestWriteVectors mempty []
-
-instance Semigroup ReadTestWriteVectors where
-    (ReadTestWriteVectors wv0 rv0) <> (ReadTestWriteVectors wv1 rv1) =
-        ReadTestWriteVectors (merge preserveMissing preserveMissing (zipWithMatched $ \_ l r -> l <> r) wv0 wv1) (rv0 <> rv1)
-
 -- XXX This derived instance is surely not compatible with Tahoe-LAFS.
 instance Serialise TestWriteVectors
 
-readv :: Offset -> Size -> ReadTestWriteVectors
-readv offset size = mempty{readVector = [ReadVector offset size]}
-
-writev :: ShareNumber -> Offset -> ShareData -> ReadTestWriteVectors
-writev shareNum offset bytes = mempty{testWriteVectors = Map.singleton shareNum (mempty{write = [WriteVector offset bytes]})}
-
-testv :: ShareNumber -> Offset -> ShareData -> ReadTestWriteVectors
-testv shareNum offset specimen =
-    mempty
-        { testWriteVectors = Map.singleton shareNum (mempty{test = [TestVector offset (fromIntegral $ B.length specimen) Eq specimen]})
-        }
-
--- XXX Most of these operators have been removed from the spec.
-data TestOperator
-    = Lt
-    | Le
-    | Eq
-    | Ne
-    | Ge
-    | Gt
-    deriving (Show, Eq, Generic, ToJSON, FromJSON)
-
 -- XXX This derived instance is surely not compatible with Tahoe-LAFS.
 instance Serialise TestOperator
 
-data TestVector = TestVector
-    { testOffset :: Offset
-    , testSize :: Size
-    , operator :: TestOperator
-    , specimen :: ShareData
-    }
-    deriving (Show, Eq, Generic, ToJSON, FromJSON)
-
 -- XXX This derived instance is surely not compatible with Tahoe-LAFS.
 instance Serialise TestVector
 
-data WriteVector = WriteVector
-    { writeOffset :: Offset
-    , shareData :: ShareData
-    }
-    deriving (Show, Eq, Generic, ToJSON, FromJSON)
-
 instance Serialise WriteVector
 
 api :: Proxy StorageAPI
diff --git a/src/TahoeLAFS/Storage/Backend.hs b/src/TahoeLAFS/Storage/Backend.hs
index 5d772befe6ff8222a7668ae25212a8a2801f75ba..efbf1878084ba787601e948c778d1db4e84ca221 100644
--- a/src/TahoeLAFS/Storage/Backend.hs
+++ b/src/TahoeLAFS/Storage/Backend.hs
@@ -5,6 +5,7 @@ module TahoeLAFS.Storage.Backend (
     WriteImmutableError (..),
     writeMutableShare,
     withUploadSecret,
+    module Tahoe.Storage.Backend,
 ) where
 
 import Control.Exception (
@@ -12,76 +13,17 @@ import Control.Exception (
     throw,
     throwIO,
  )
-
 import Data.Map.Strict (
     fromList,
  )
-
 import Network.HTTP.Types (
     ByteRanges,
  )
+import Tahoe.Storage.Backend
 import TahoeLAFS.Storage.API (
-    AllocateBuckets,
-    AllocationResult,
-    CBORSet (..),
-    CorruptionDetails,
-    LeaseSecret (..),
-    QueryRange,
-    ReadTestWriteResult (..),
-    ReadTestWriteVectors (..),
-    ShareData,
-    ShareNumber,
-    StorageIndex,
-    TestWriteVectors (..),
-    UploadSecret (..),
-    Version,
-    WriteEnablerSecret,
-    WriteVector (..),
     isUploadSecret,
  )
 
-data WriteImmutableError
-    = MissingUploadSecret
-    | ShareSizeMismatch
-    | ImmutableShareAlreadyWritten
-    | ShareNotAllocated
-    | IncorrectUploadSecret
-    | IncorrectWriteEnablerSecret
-    deriving (Ord, Eq, Show)
-instance Exception WriteImmutableError
-
-class Backend b where
-    version :: b -> IO Version
-
-    -- | Update the lease expiration time on the shares associated with the
-    -- given storage index.
-    renewLease :: b -> StorageIndex -> [LeaseSecret] -> IO ()
-
-    createImmutableStorageIndex :: b -> StorageIndex -> Maybe [LeaseSecret] -> AllocateBuckets -> IO AllocationResult
-
-    -- May throw ImmutableShareAlreadyWritten
-    writeImmutableShare :: b -> StorageIndex -> ShareNumber -> Maybe [LeaseSecret] -> ShareData -> Maybe ByteRanges -> IO ()
-    abortImmutableUpload :: b -> StorageIndex -> ShareNumber -> Maybe [LeaseSecret] -> IO ()
-    adviseCorruptImmutableShare :: b -> StorageIndex -> ShareNumber -> CorruptionDetails -> IO ()
-    getImmutableShareNumbers :: b -> StorageIndex -> IO (CBORSet ShareNumber)
-    readImmutableShare :: b -> StorageIndex -> ShareNumber -> QueryRange -> IO ShareData
-
-    -- | Read some ranges of all shares held and/or, if test conditions are
-    -- met, overwrite some ranges of some shares.
-    readvAndTestvAndWritev ::
-        b ->
-        -- | The storage index at which to operate.
-        StorageIndex ->
-        -- | A shared secret which the backend can use to authorize the writes.
-        WriteEnablerSecret ->
-        -- | The reads, tests, and writes to perform.
-        ReadTestWriteVectors ->
-        IO ReadTestWriteResult
-
-    readMutableShare :: b -> StorageIndex -> ShareNumber -> QueryRange -> IO ShareData
-    getMutableShareNumbers :: b -> StorageIndex -> IO (CBORSet ShareNumber)
-    adviseCorruptMutableShare :: b -> StorageIndex -> ShareNumber -> CorruptionDetails -> IO ()
-
 writeMutableShare ::
     Backend b =>
     b ->
diff --git a/src/TahoeLAFS/Storage/Backend/Filesystem.hs b/src/TahoeLAFS/Storage/Backend/Filesystem.hs
index 2459c86ceaf8ce3f451163a6421bab43d400203e..039d7ed4d71845ae2aadee84da166d0e7b0d4646 100644
--- a/src/TahoeLAFS/Storage/Backend/Filesystem.hs
+++ b/src/TahoeLAFS/Storage/Backend/Filesystem.hs
@@ -1,3 +1,4 @@
+{-# LANGUAGE LambdaCase #-}
 {-# LANGUAGE NumericUnderscores #-}
 {-# LANGUAGE OverloadedStrings #-}
 
@@ -30,7 +31,7 @@ import Data.Maybe (
     mapMaybe,
  )
 import qualified Data.Set as Set
-import Data.Tuple.Extra ((&&&))
+import Network.HTTP.Types (ByteRange (..))
 import System.Directory (
     createDirectoryIfMissing,
     doesPathExist,
@@ -45,11 +46,17 @@ import System.FilePath (
 import System.IO (
     Handle,
     IOMode (ReadMode, ReadWriteMode),
-    SeekMode (AbsoluteSeek),
+    SeekMode (..),
+    hFileSize,
     hSeek,
     withBinaryFile,
  )
 import System.IO.Error (isDoesNotExistError)
+import Tahoe.Storage.Backend (
+    Backend (..),
+    WriteImmutableError (..),
+    WriteMutableError (..),
+ )
 import TahoeLAFS.Storage.API (
     AllocateBuckets (..),
     AllocationResult (..),
@@ -58,7 +65,6 @@ import TahoeLAFS.Storage.API (
     ReadResult,
     ReadTestWriteResult (ReadTestWriteResult, readData, success),
     ReadTestWriteVectors (ReadTestWriteVectors),
-    ReadVector (..),
     ShareData,
     ShareNumber,
     Size,
@@ -73,11 +79,8 @@ import TahoeLAFS.Storage.API (
     shareNumber,
  )
 import qualified TahoeLAFS.Storage.API as Storage
-import TahoeLAFS.Storage.Backend (
-    Backend (..),
-    WriteImmutableError (ImmutableShareAlreadyWritten, IncorrectUploadSecret, IncorrectWriteEnablerSecret),
-    withUploadSecret,
- )
+import TahoeLAFS.Storage.Backend (withUploadSecret)
+import TahoeLAFS.Storage.Backend.Util (readvToByteRange)
 import Prelude hiding (
     readFile,
     writeFile,
@@ -93,6 +96,12 @@ versionString = "tahoe-lafs (gbs) 0.1.0"
 maxMutableShareSize :: Storage.Size
 maxMutableShareSize = 69_105 * 1_000 * 1_000 * 1_000 * 1_000
 
+-- Perhaps should be based on the underlying filesystem?  Currently a single
+-- file is used per share so we cannot hold a share larger than the
+-- filesystem's largest support file.
+maximumShareSize :: Integral i => i
+maximumShareSize = fromIntegral (maxBound :: Int)
+
 --  storage/
 --  storage/shares/incoming
 --    incoming/ holds temp dirs named $START/$STORAGEINDEX/$SHARENUM which will
@@ -115,7 +124,7 @@ instance Backend FilesystemBackend where
                 { applicationVersion = versionString
                 , parameters =
                     Version1Parameters
-                        { maximumImmutableShareSize = available
+                        { maximumImmutableShareSize = maximumShareSize
                         , maximumMutableShareSize = maxMutableShareSize
                         , -- TODO: Copy the "reserved space" feature of the Python
                           -- implementation.
@@ -127,7 +136,7 @@ instance Backend FilesystemBackend where
         withUploadSecret secrets $ \uploadSecret -> do
             let exists = haveShare backend storageIndex
             (alreadyHave, allocated) <- partitionM exists (shareNumbers params)
-            mapM_ (flip (allocate backend storageIndex) uploadSecret) allocated
+            mapM_ (flip (allocate backend storageIndex (allocatedSize params)) uploadSecret) allocated
             return
                 AllocationResult
                     { alreadyHave = alreadyHave
@@ -146,18 +155,25 @@ instance Backend FilesystemBackend where
                 else do
                     let finalSharePath = pathOfShare root storageIndex shareNumber'
                     let incomingSharePath = incomingPathOf root storageIndex shareNumber'
-                    checkUploadSecret incomingSharePath uploadSecret
-                    writeFile incomingSharePath shareData
-                    let createParents = True
-                    createDirectoryIfMissing createParents $ takeDirectory finalSharePath
-                    removeFile (secretPath incomingSharePath)
-                    renameFile incomingSharePath finalSharePath
+
+                    secretCheck <- try $ checkUploadSecret incomingSharePath uploadSecret
+                    case secretCheck of
+                        Left e
+                            | isDoesNotExistError e -> throwIO ShareNotAllocated
+                            | otherwise -> throwIO e
+                        Right () -> do
+                            writeFile incomingSharePath shareData
+                            let createParents = True
+                            createDirectoryIfMissing createParents $ takeDirectory finalSharePath
+                            removeFileIfExists (secretPath incomingSharePath)
+                            renameFile incomingSharePath finalSharePath
 
     abortImmutableUpload (FilesystemBackend root) storageIndex shareNumber' secrets =
         withUploadSecret secrets $ \uploadSecret -> do
             let incomingSharePath = incomingPathOf root storageIndex shareNumber'
             checkUploadSecret incomingSharePath uploadSecret
-            removeFile incomingSharePath
+            removeFileIfExists incomingSharePath
+            removeFileIfExists (secretPath incomingSharePath)
 
     getImmutableShareNumbers (FilesystemBackend root) storageIndex = do
         let storageIndexPath = pathOfStorageIndex root storageIndex
@@ -178,6 +194,11 @@ instance Backend FilesystemBackend where
 
     getMutableShareNumbers = getImmutableShareNumbers
 
+    readMutableShare backend storageIndex shareNum Nothing =
+        readMutableShare backend storageIndex shareNum (Just [ByteRangeFrom 0])
+    readMutableShare (FilesystemBackend root) storageIndex shareNum (Just ranges) =
+        B.concat <$> mapM (readMutableShare' root storageIndex shareNum) ranges
+
     readvAndTestvAndWritev
         backend@(FilesystemBackend root)
         storageIndex
@@ -200,21 +221,14 @@ instance Backend FilesystemBackend where
                 Map.fromList . zip allShareNumbers' <$> mapM readvOneShare allShareNumbers'
 
             readvOneShare :: ShareNumber -> IO [ShareData]
-            readvOneShare shareNum =
-                mapM (uncurry (readShare shareNum) . (offset &&& readSize)) readv
+            readvOneShare shareNum = mapM (readMutableShare' root storageIndex shareNum) . fmap readvToByteRange $ readv
 
             checkTestVectors :: ShareNumber -> [TestVector] -> IO [Bool]
             checkTestVectors = mapM . checkTestVector
 
             checkTestVector :: ShareNumber -> TestVector -> IO Bool
-            checkTestVector shareNum TestVector{..} = (specimen ==) <$> readShare shareNum testOffset testSize
-
-            readShare :: ShareNumber -> Offset -> Size -> IO ShareData
-            readShare shareNum offset size = withBinaryFile path ReadMode $ \shareFile -> do
-                hSeek shareFile AbsoluteSeek offset
-                B.hGetSome shareFile (fromIntegral size)
-              where
-                path = pathOfShare root storageIndex shareNum
+            checkTestVector shareNum TestVector{..} =
+                (specimen ==) <$> readMutableShare' root storageIndex shareNum (ByteRangeFromTo testOffset (testOffset + testSize - 1))
 
             applyWriteVectors ::
                 (ShareNumber, TestWriteVectors) ->
@@ -276,18 +290,26 @@ storageStartSegment (a : b : _) = [a, b]
 allocate ::
     FilesystemBackend ->
     StorageIndex ->
+    Size ->
     ShareNumber ->
     UploadSecret ->
     IO ()
-allocate (FilesystemBackend root) storageIndex shareNum (UploadSecret secret) =
-    let sharePath = incomingPathOf root storageIndex shareNum
-        shareDirectory = takeDirectory sharePath
-        createParents = True
-     in do
-            createDirectoryIfMissing createParents shareDirectory
-            writeFile (secretPath sharePath) secret
-            writeFile sharePath ""
-            return ()
+allocate (FilesystemBackend root) storageIndex size shareNum (UploadSecret secret)
+    | size > maximumShareSize =
+        throwIO
+            MaximumShareSizeExceeded
+                { maximumShareSizeExceededLimit = maximumShareSize
+                , maximumShareSizeExceededGiven = size
+                }
+    | otherwise =
+        let sharePath = incomingPathOf root storageIndex shareNum
+            shareDirectory = takeDirectory sharePath
+            createParents = True
+         in do
+                createDirectoryIfMissing createParents shareDirectory
+                writeFile (secretPath sharePath) secret
+                writeFile sharePath ""
+                return ()
 
 {- | Given the path of an immutable share, construct a path to use to hold the
  upload secret for that share.
@@ -329,3 +351,28 @@ checkWriteEnabler (WriteEnablerSecret given) storageIndexPath = do
             | otherwise -> throwIO IncorrectWriteEnablerSecret
   where
     path = secretPath storageIndexPath
+
+removeFileIfExists :: FilePath -> IO ()
+removeFileIfExists p =
+    try (removeFile p) >>= \case
+        Left e
+            | isDoesNotExistError e -> pure ()
+            | otherwise -> throwIO e
+        Right () -> pure ()
+
+readMutableShare' :: FilePath -> StorageIndex -> ShareNumber -> ByteRange -> IO ShareData
+readMutableShare' root storageIndex shareNum range =
+    withBinaryFile path ReadMode (readTheRange range)
+  where
+    path = pathOfShare root storageIndex shareNum
+
+    readTheRange (ByteRangeFrom start) shareFile = do
+        hSeek shareFile AbsoluteSeek start
+        B.hGetContents shareFile
+    readTheRange (ByteRangeFromTo start end) shareFile = do
+        hSeek shareFile AbsoluteSeek start
+        B.hGetSome shareFile (fromIntegral $ end - start + 1)
+    readTheRange (ByteRangeSuffix len) shareFile = do
+        realSize <- hFileSize shareFile
+        hSeek shareFile SeekFromEnd (-(min realSize len))
+        B.hGetContents shareFile
diff --git a/src/TahoeLAFS/Storage/Backend/Memory.hs b/src/TahoeLAFS/Storage/Backend/Memory.hs
index b10ee5413361df315fe5dece9d5f44247f34ee8d..57e1a45e5bef23969025829467e8aaa5d54baeb9 100644
--- a/src/TahoeLAFS/Storage/Backend/Memory.hs
+++ b/src/TahoeLAFS/Storage/Backend/Memory.hs
@@ -1,12 +1,6 @@
 {-# LANGUAGE FlexibleInstances #-}
 
-module TahoeLAFS.Storage.Backend.Memory (
-    MemoryBackend (MemoryBackend),
-    memoryBackend,
-    MutableShareSize (..),
-    shareDataSize,
-    toMutableShareSize,
-) where
+module TahoeLAFS.Storage.Backend.Memory where
 
 import Control.Exception (
     throw,
@@ -15,22 +9,22 @@ import Control.Exception (
 import Control.Foldl.ByteString (Word8)
 import Data.ByteArray (constEq)
 import qualified Data.ByteString as B
+import Data.Composition ((.:))
 import Data.IORef (
     IORef,
     atomicModifyIORef',
-    modifyIORef,
     newIORef,
     readIORef,
  )
 import Data.Map.Merge.Strict (merge, preserveMissing, zipWithMatched)
 import qualified Data.Map.Strict as Map
 import Data.Maybe (fromMaybe, isNothing)
-import Data.Monoid (Last (Last, getLast))
+import Data.Monoid (All (All, getAll), First (First, getFirst))
 import qualified Data.Set as Set
-import Network.HTTP.Types (ByteRange (ByteRangeFrom, ByteRangeFromTo, ByteRangeSuffix))
-import TahoeLAFS.Storage.API (
+import Tahoe.Storage.Backend (
     AllocateBuckets (AllocateBuckets),
     AllocationResult (..),
+    Backend (..),
     CBORSet (..),
     Offset,
     QueryRange,
@@ -41,18 +35,20 @@ import TahoeLAFS.Storage.API (
     ShareNumber,
     Size,
     StorageIndex,
+    TestVector (..),
     TestWriteVectors (..),
     UploadSecret (UploadSecret),
     Version (..),
     Version1Parameters (..),
-    WriteEnablerSecret (WriteEnablerSecret),
+    WriteEnablerSecret,
+    WriteImmutableError (..),
+    WriteMutableError (..),
     WriteVector (..),
  )
 import TahoeLAFS.Storage.Backend (
-    Backend (..),
-    WriteImmutableError (ImmutableShareAlreadyWritten, IncorrectUploadSecret, IncorrectWriteEnablerSecret, ShareNotAllocated, ShareSizeMismatch),
     withUploadSecret,
  )
+import TahoeLAFS.Storage.Backend.Util (queryRangeToReadVector, readvToQueryRange)
 import Prelude hiding (
     lookup,
     map,
@@ -65,7 +61,10 @@ data Bucket = Bucket
     , bucketShares :: Map.Map ShareNumber ImmutableShare
     }
 
-data SecretProtected a = SecretProtected WriteEnablerSecret a
+data SecretProtected a = SecretProtected WriteEnablerSecret a deriving (Eq)
+
+instance Show a => Show (SecretProtected a) where
+    show (SecretProtected _ a) = "SecretProtected " <> show a
 
 readSecret :: SecretProtected a -> WriteEnablerSecret
 readSecret (SecretProtected s _) = s
@@ -139,6 +138,12 @@ allocate ::
     (MemoryBackend, AllocationResult)
 allocate storageIndex shareNumbers uploadSecret size backend@MemoryBackend{memoryBackendBuckets}
     | maybe size bucketSize existing /= size = throw ShareSizeMismatch
+    | size > maximumShareSize =
+        throw
+            MaximumShareSizeExceeded
+                { maximumShareSizeExceededLimit = maximumShareSize
+                , maximumShareSizeExceededGiven = size
+                }
     | otherwise =
         ( backend{memoryBackendBuckets = updated}
         , result
@@ -206,18 +211,24 @@ writeImm storageIndex shareNum (UploadSecret uploadSecret) newData b@MemoryBacke
 instance Show MemoryBackend where
     show _ = "<MemoryBackend>"
 
+maximumShareSize :: Integral i => i
+maximumShareSize = fromIntegral (maxBound :: Int)
+
+makeVersionParams :: Integer -> Version1Parameters
+makeVersionParams totalSize =
+    Version1Parameters
+        { maximumImmutableShareSize = maximumShareSize
+        , maximumMutableShareSize = maximumShareSize
+        , availableSpace = (1024 * 1024 * 1024) - totalSize
+        }
+
 instance Backend (IORef MemoryBackend) where
     version backend = do
         totalSize <- readIORef backend >>= totalShareSize
         return
             Version
                 { applicationVersion = "(memory)"
-                , parameters =
-                    Version1Parameters
-                        { maximumImmutableShareSize = 1024 * 1024 * 64
-                        , maximumMutableShareSize = 1024 * 1024 * 64
-                        , availableSpace = (1024 * 1024 * 1024) - totalSize
-                        }
+                , parameters = makeVersionParams totalSize
                 }
 
     getMutableShareNumbers :: IORef MemoryBackend -> StorageIndex -> IO (CBORSet ShareNumber)
@@ -230,48 +241,49 @@ instance Backend (IORef MemoryBackend) where
             $ sharemap
 
     readvAndTestvAndWritev :: IORef MemoryBackend -> StorageIndex -> WriteEnablerSecret -> ReadTestWriteVectors -> IO ReadTestWriteResult
-    readvAndTestvAndWritev
-        backend
-        storageIndex
-        secret
-        (ReadTestWriteVectors testWritev readv) = do
-            -- TODO implement testv parts.
-
-            (CBORSet allShareNums) <- getMutableShareNumbers backend storageIndex
-            let queryRange = readvToQueryRange readv
-
-            readData <- mapM (\shareNum -> (shareNum,) <$> readMutableShare' backend storageIndex shareNum queryRange) (Set.toList allShareNums)
-            outcome <- atomicModifyIORef' backend tryWrite
-            case outcome of
-                TestSuccess ->
-                    return
-                        ReadTestWriteResult
-                            { readData = Map.fromList readData
-                            , success = True
-                            }
-                TestFail ->
-                    return
-                        ReadTestWriteResult
-                            { readData = Map.fromList readData
-                            , success = False
-                            }
-                SecretMismatch ->
-                    throwIO IncorrectWriteEnablerSecret
+    readvAndTestvAndWritev backend storageIndex secret (ReadTestWriteVectors testWritev readv) = do
+        (CBORSet allShareNums) <- getMutableShareNumbers backend storageIndex
+        let queryRange = readvToQueryRange readv
+
+        readData <- mapM (\shareNum -> (shareNum,) <$> readMutableShare' backend storageIndex shareNum queryRange) (Set.toList allShareNums)
+
+        outcome <- atomicModifyIORef' backend tryWrite
+        case outcome of
+            TestSuccess ->
+                return
+                    ReadTestWriteResult
+                        { readData = Map.fromList readData
+                        , success = True
+                        }
+            TestFail ->
+                return
+                    ReadTestWriteResult
+                        { readData = Map.fromList readData
+                        , success = False
+                        }
+            SecretMismatch ->
+                throwIO IncorrectWriteEnablerSecret
+      where
+        checkTestVectors :: MutableShareStorage -> Map.Map ShareNumber TestWriteVectors -> Bool
+        checkTestVectors mutableShares = getAll . Map.foldMapWithKey (foldMap2 $ All .: checkTestVector mutableShares) . Map.map test
+
+        checkTestVector :: MutableShareStorage -> ShareNumber -> TestVector -> Bool
+        checkTestVector mutableShares shareNum TestVector{..} =
+            specimen == actual
           where
-            readvToQueryRange :: [ReadVector] -> QueryRange
-            --            readvToQueryRange [] = Nothing
-            readvToQueryRange rv = Just (go rv)
-              where
-                go [] = []
-                go (r : rs) = ByteRangeFromTo off end : go rs
-                  where
-                    off = offset r
-                    end = off + readSize r - 1
-
-            tryWrite m@MemoryBackend{mutableShares} =
+            actual =
+                readMutableShare''
+                    mutableShares
+                    storageIndex
+                    shareNum
+                    ReadVector{offset = testOffset, readSize = fromIntegral $ B.length specimen}
+
+        tryWrite m@MemoryBackend{mutableShares}
+            | checkTestVectors mutableShares testWritev =
                 case addShares storageIndex secret mutableShares (Map.map write testWritev) of
                     Nothing -> (m, SecretMismatch)
                     Just newShares -> (m{mutableShares = newShares}, TestSuccess)
+            | otherwise = (m, TestFail)
 
     readMutableShare backend storageIndex shareNum queryRange =
         B.concat <$> readMutableShare' backend storageIndex shareNum queryRange
@@ -320,7 +332,7 @@ addShare storageIndex secret shareNum writev =
     f :: Map.Map ShareNumber [WriteVector] -> Map.Map ShareNumber [WriteVector] -> Map.Map ShareNumber [WriteVector]
     f = merge preserveMissing preserveMissing (zipWithMatched (const (<>)))
 
-    newShare = SecretProtected secret (Map.singleton shareNum writev)
+    newShare = SecretProtected secret (Map.singleton shareNum (reverse writev))
 
 addShares :: StorageIndex -> WriteEnablerSecret -> MutableShareStorage -> Map.Map ShareNumber [WriteVector] -> Maybe MutableShareStorage
 addShares storageIndex secret existing updates
@@ -339,31 +351,14 @@ memoryBackend = do
 readMutableShare' :: IORef MemoryBackend -> StorageIndex -> ShareNumber -> QueryRange -> IO [ShareData]
 readMutableShare' backend storageIndex shareNum queryRange = do
     storage <- mutableShares <$> readIORef backend
-    pure $ doOneRead <$> rv storage <*> pure storage
-  where
-    rv :: MutableShareStorage -> [ReadVector]
-    rv storage = queryRangeToReadVector storage queryRange
-
-    getShareData storage =
-        Map.lookup storageIndex storage >>= Map.lookup shareNum . readProtected
-
-    doOneRead :: ReadVector -> MutableShareStorage -> ShareData
-    doOneRead readv storage =
-        maybe "" (readOneVector readv) (getShareData storage)
+    let shareSize = maybe 0 shareDataSize (getShareData storage storageIndex shareNum)
+    pure $ readMutableShare'' storage storageIndex shareNum <$> queryRangeToReadVector shareSize queryRange
 
-    queryRangeToReadVector :: MutableShareStorage -> QueryRange -> [ReadVector]
-    queryRangeToReadVector storage Nothing = [ReadVector 0 size]
-      where
-        size = maybe 0 shareDataSize (getShareData storage)
-    queryRangeToReadVector storage (Just ranges) = toReadVector <$> ranges
-      where
-        toReadVector (ByteRangeFrom start) = ReadVector start size
-          where
-            size = maybe 0 shareDataSize (getShareData storage)
-        toReadVector (ByteRangeFromTo start end) = ReadVector start (end - start + 1)
-        toReadVector (ByteRangeSuffix len) = ReadVector (size - len) len
-          where
-            size = maybe 0 shareDataSize (getShareData storage)
+readMutableShare'' :: MutableShareStorage -> StorageIndex -> ShareNumber -> ReadVector -> ShareData
+readMutableShare'' storage storageIndex shareNum rv =
+    maybe "" (readOneVector rv) theShareData
+  where
+    theShareData = getShareData storage storageIndex shareNum
 
     readOneVector :: ReadVector -> [WriteVector] -> ShareData
     readOneVector ReadVector{offset, readSize} wv =
@@ -374,15 +369,19 @@ readMutableShare' backend storageIndex shareNum queryRange = do
         extractBytes :: Integer -> Word8
         extractBytes p = fromMaybe 0 (go wv)
           where
-            -- New writes are added to the end of the list so give the Last
+            -- New writes are added to the front of the list so give the First
             -- write precedence over others.
-            go = getLast . foldMap (Last . byteFromShare p)
+            go = getFirst . foldMap (First . byteFromShare p)
 
         byteFromShare :: Integer -> WriteVector -> Maybe Word8
         byteFromShare p (WriteVector off bytes)
             | p >= off && p < off + fromIntegral (B.length bytes) = Just (B.index bytes (fromIntegral $ p - off))
             | otherwise = Nothing
 
+getShareData :: MutableShareStorage -> StorageIndex -> ShareNumber -> Maybe [WriteVector]
+getShareData storage storageIndex shareNum =
+    Map.lookup storageIndex storage >>= Map.lookup shareNum . readProtected
+
 -- | Internal type tracking the result of an attempted mutable write.
 data WriteResult
     = -- | The test condition succeeded and the write was performed.
@@ -391,3 +390,6 @@ data WriteResult
       TestFail
     | -- | The supplied secret was incorrect and the write was not performed.
       SecretMismatch
+
+foldMap2 :: (Foldable o, Monoid c) => (a -> b -> c) -> (a -> o b -> c)
+foldMap2 f a = foldMap (f a)
diff --git a/src/TahoeLAFS/Storage/Backend/Util.hs b/src/TahoeLAFS/Storage/Backend/Util.hs
new file mode 100644
index 0000000000000000000000000000000000000000..6ec5a8aba3f26a0e0b8d7bd9aadf53dcfde8193e
--- /dev/null
+++ b/src/TahoeLAFS/Storage/Backend/Util.hs
@@ -0,0 +1,27 @@
+module TahoeLAFS.Storage.Backend.Util where
+
+import Network.HTTP.Types (ByteRange (..))
+import Tahoe.Storage.Backend (QueryRange, ReadVector (..), Size)
+
+readvToByteRange :: ReadVector -> ByteRange
+readvToByteRange ReadVector{..} = ByteRangeFromTo offset (offset + readSize - 1)
+
+readvToQueryRange :: [ReadVector] -> QueryRange
+readvToQueryRange = Just . fmap readvToByteRange
+
+queryRangeToReadVector :: Size -> QueryRange -> [ReadVector]
+queryRangeToReadVector shareSize Nothing = [ReadVector 0 shareSize]
+queryRangeToReadVector shareSize (Just ranges) = toReadVector <$> ranges
+  where
+    toReadVector (ByteRangeFrom start) = ReadVector offset size
+      where
+        offset = max 0 start
+        size = shareSize - offset
+    toReadVector (ByteRangeFromTo start end) = ReadVector offset size
+      where
+        offset = min shareSize (max 0 start)
+        size = min (shareSize - offset) (end - start + 1)
+    toReadVector (ByteRangeSuffix len) = ReadVector offset size
+      where
+        offset = max 0 $ shareSize - len
+        size = min (shareSize - offset) len
diff --git a/tahoe-great-black-swamp.cabal b/tahoe-great-black-swamp.cabal
index 7e23b4feaac5cf1722ee612d7f0872df979fe214..dc7a2d31009f77674c2a841631be6c791853db2f 100644
--- a/tahoe-great-black-swamp.cabal
+++ b/tahoe-great-black-swamp.cabal
@@ -100,41 +100,44 @@ library
     TahoeLAFS.Storage.Backend.Filesystem
     TahoeLAFS.Storage.Backend.Memory
     TahoeLAFS.Storage.Backend.Null
+    TahoeLAFS.Storage.Backend.Util
     TahoeLAFS.Storage.Client
     TahoeLAFS.Storage.Server
 
   default-language: Haskell2010
   build-depends:
-    , asn1-encoding       >=0.9.6    && <0.10
-    , asn1-types          >=0.3.4    && <0.4
-    , base                >=4.7      && <5
-    , base64              >=0.2      && <0.5
-    , base64-bytestring   >=1.0.0.3  && <1.3
-    , cborg               >=0.2.4    && <0.3
-    , cborg-json          >=0.2.2    && <0.3
-    , connection          >=0.3.1    && <0.4
-    , cryptonite          >=0.27     && <0.31
-    , data-default-class  >=0.1      && <0.2
-    , http-api-data       >=0.4.1.1  && <0.7
-    , http-client         >=0.6.4.1  && <0.8
-    , http-client-tls     >=0.3.5.3  && <0.4
-    , http-media          >=0.8      && <0.9
-    , memory              >=0.15     && <0.19
-    , network             >=3.1.2    && <3.2
-    , network-uri         >=2.6.3    && <2.7
-    , scientific          >=0.3.6.2  && <0.4
-    , serialise           >=0.2.3    && <0.3
-    , servant-client      >=0.16.0.1 && <0.21
-    , servant-docs        >=0.11.4   && <0.14
-    , servant-server      >=0.16.2   && <0.21
-    , tls                 >=1.5      && <2
-    , utf8-string         >=1.0.1.1  && <1.1
-    , wai                 >=3.2.2.1  && <3.3
-    , warp                >=3.3.13   && <3.4
-    , warp-tls            >=3.2.12   && <3.5
-    , x509                >=1.7      && <1.8
-    , x509-store          >=1.6      && <1.7
-    , x509-validation     >=1.6      && <1.7
+    , asn1-encoding                  >=0.9.6    && <0.10
+    , asn1-types                     >=0.3.4    && <0.4
+    , base                           >=4.7      && <5
+    , base64                         >=0.2      && <0.5
+    , base64-bytestring              >=1.0.0.3  && <1.3
+    , cborg                          >=0.2.4    && <0.3
+    , cborg-json                     >=0.2.2    && <0.3
+    , composition                    >=1.0      && <1.1
+    , connection                     >=0.3.1    && <0.4
+    , cryptonite                     >=0.27     && <0.31
+    , data-default-class             >=0.1      && <0.2
+    , http-api-data                  >=0.4.1.1  && <0.7
+    , http-client                    >=0.6.4.1  && <0.8
+    , http-client-tls                >=0.3.5.3  && <0.4
+    , http-media                     >=0.8      && <0.9
+    , memory                         >=0.15     && <0.19
+    , network                        >=3.1.2    && <3.2
+    , network-uri                    >=2.6.3    && <2.7
+    , scientific                     >=0.3.6.2  && <0.4
+    , serialise                      >=0.2.3    && <0.3
+    , servant-client                 >=0.16.0.1 && <0.21
+    , servant-docs                   >=0.11.4   && <0.14
+    , servant-server                 >=0.16.2   && <0.21
+    , tahoe-great-black-swamp-types  >=0.4      && <0.5
+    , tls                            >=1.5      && <2
+    , utf8-string                    >=1.0.1.1  && <1.1
+    , wai                            >=3.2.2.1  && <3.3
+    , warp                           >=3.3.13   && <3.4
+    , warp-tls                       >=3.2.12   && <3.5
+    , x509                           >=1.7      && <1.8
+    , x509-store                     >=1.6      && <1.7
+    , x509-validation                >=1.6      && <1.7
 
 -- executable gbs-generate-apidocs
 --   hs-source-dirs:      generate-apidocs
@@ -212,32 +215,34 @@ test-suite http-tests
   default-language: Haskell2010
   ghc-options:      -Wall -threaded
   build-depends:
-    , base                     >=4.7      && <5
-    , base32string             >=0.9.1    && <0.10
-    , base64                   >=0.2      && <0.5
-    , cborg                    >=0.2.4    && <0.3
-    , connection               >=0.3      && <0.4
-    , data-default-class       >=0.1      && <0.2
-    , data-interval            >=2.0.1    && <2.2
-    , hspec                    <2.12
-    , hspec-expectations       <0.9
-    , hspec-wai                <0.12
-    , http-client              >=0.6.4.1  && <0.8
-    , network                  >=3.1      && <3.2
-    , network-simple-tls       >=0.4      && <0.5
-    , QuickCheck               <2.15
-    , quickcheck-classes       >=0.6      && <0.7
-    , quickcheck-instances     <0.4
-    , serialise                >=0.2.3    && <0.3
-    , servant                  >=0.16.2   && <0.21
-    , servant-client           >=0.16.0.1 && <0.21
+    , base                             >=4.7      && <5
+    , base32string                     >=0.9.1    && <0.10
+    , base64                           >=0.2      && <0.5
+    , cborg                            >=0.2.4    && <0.3
+    , connection                       >=0.3      && <0.4
+    , data-default-class               >=0.1      && <0.2
+    , data-interval                    >=2.0.1    && <2.2
+    , hspec                            <2.12
+    , hspec-expectations               <0.9
+    , hspec-wai                        <0.12
+    , http-client                      >=0.6.4.1  && <0.8
+    , network                          >=3.1      && <3.2
+    , network-simple-tls               >=0.4      && <0.5
+    , QuickCheck                       <2.15
+    , quickcheck-classes               >=0.6      && <0.7
+    , quickcheck-instances             <0.4
+    , serialise                        >=0.2.3    && <0.3
+    , servant                          >=0.16.2   && <0.21
+    , servant-client                   >=0.16.0.1 && <0.21
     , tahoe-great-black-swamp
-    , temporary                >=1.3      && <1.4
-    , tls                      >=1.5      && <2
-    , vector                   >=0.12.1.2 && <0.13
-    , wai-extra                >=3.0.29.2 && <3.2
-    , warp                     >=3.3.13   && <3.4
-    , warp-tls                 >=3.2.12   && <3.5
-    , x509                     >=1.7      && <1.8
-    , x509-store               >=1.6      && <1.7
-    , yaml                     >=0.11.5.0 && <0.11.9.0 || >=0.11.9.0.0 && <0.12
+    , tahoe-great-black-swamp-testing  >=0.4      && <0.5
+    , tahoe-great-black-swamp-types    >=0.4      && <0.5
+    , temporary                        >=1.3      && <1.4
+    , tls                              >=1.5      && <2
+    , vector                           >=0.12.1.2 && <0.13
+    , wai-extra                        >=3.0.29.2 && <3.2
+    , warp                             >=3.3.13   && <3.4
+    , warp-tls                         >=3.2.12   && <3.5
+    , x509                             >=1.7      && <1.8
+    , x509-store                       >=1.6      && <1.7
+    , yaml                             >=0.11.5.0 && <0.11.9.0 || >=0.11.9.0.0 && <0.12
diff --git a/test/Lib.hs b/test/Lib.hs
index 7c9af770bb1a1a8abb20caca6ccda0aff60c05e4..e0aaa0918c68750ee148bbad84b5ee8c15155c62 100644
--- a/test/Lib.hs
+++ b/test/Lib.hs
@@ -1,13 +1,10 @@
 {-# LANGUAGE OverloadedStrings #-}
-{-# OPTIONS_GHC -Wno-orphans #-}
 
 module Lib (
     gen10String,
-    genStorageIndex,
     positiveIntegers,
     b32encode,
     b32decode,
-    ShareNumbers (..),
 ) where
 
 import Data.Word (
@@ -56,10 +53,6 @@ gen10ByteString :: Gen ByteString
 gen10ByteString =
     suchThatMap (vectorOf 10 (arbitrary :: Gen Word8)) (Just . pack)
 
-genStorageIndex :: Gen StorageIndex
-genStorageIndex =
-    suchThatMap gen10ByteString (Just . b32encode)
-
 positiveIntegers :: Gen Integer
 positiveIntegers =
     suchThatMap (arbitrary :: Gen Integer) (Just . abs)
@@ -73,37 +66,3 @@ b32encode = Text.unpack . Base32.toText . Base32.fromBytes b32table
 b32decode :: String -> ByteString
 b32decode base32 =
     Base32.toBytes b32table $ Base32.fromText b32table $ Text.pack base32
-
-newtype ShareNumbers = ShareNumbers {getShareNumbers :: [ShareNumber]} deriving (Eq, Ord, Show)
-
-{- | An Arbitrary instance that guarantees ShareNumbers are unique and
-   non-empty (without invoking discard).
--}
-instance Arbitrary ShareNumbers where
-    arbitrary = ShareNumbers . fmap ShareNumber <$> nums
-      where
-        nums =
-            arbitrary
-                >>= (shuffle . enumFromTo 0) . getNonNegative
-                >>= \(num : rest) -> (num :) <$> sublistOf rest
-
-instance Arbitrary ShareNumber where
-    arbitrary = ShareNumber <$> arbNonNeg
-
-instance Arbitrary ReadTestWriteVectors where
-    arbitrary = ReadTestWriteVectors <$> arbitrary <*> arbitrary
-
-instance Arbitrary TestWriteVectors where
-    arbitrary = TestWriteVectors <$> arbitrary <*> arbitrary <*> oneof [pure Nothing, Just <$> arbNonNeg]
-
-instance Arbitrary TestVector where
-    arbitrary = TestVector <$> arbNonNeg <*> arbNonNeg <*> pure Eq <*> arbitrary
-
-instance Arbitrary WriteVector where
-    arbitrary = WriteVector <$> arbNonNeg <*> arbitrary
-
-instance Arbitrary ReadVector where
-    arbitrary = ReadVector <$> arbNonNeg <*> (getPositive <$> arbitrary)
-
-arbNonNeg :: (Arbitrary n, Integral n) => Gen n
-arbNonNeg = getNonNegative <$> arbitrary
diff --git a/test/MiscSpec.hs b/test/MiscSpec.hs
index bd3e170c82d70b83e9e8747484b3baa84d045a13..0986284607dea2d9e5e7095be8923b78c184134c 100644
--- a/test/MiscSpec.hs
+++ b/test/MiscSpec.hs
@@ -35,10 +35,9 @@ import TahoeLAFS.Storage.API (
 import Lib (
     b32decode,
     b32encode,
-    genStorageIndex,
     positiveIntegers,
  )
-
+import Tahoe.Storage.Testing.Spec (genStorageIndex)
 import TahoeLAFS.Storage.Backend.Filesystem (
     incomingPathOf,
     partitionM,
diff --git a/test/SemanticSpec.hs b/test/SemanticSpec.hs
index 9324c8f5b9cb5338b0c3d569c29ad5be78f73b44..6d85d0958349ba3e7cb3a757a62b6d85bd29f594 100644
--- a/test/SemanticSpec.hs
+++ b/test/SemanticSpec.hs
@@ -1,70 +1,37 @@
-{-# LANGUAGE FlexibleInstances #-}
-
 module SemanticSpec (
     spec,
 ) where
 
-import Prelude hiding (
-    lookup,
-    toInteger,
- )
-
 import Control.Monad (
     void,
     when,
  )
-
 import Data.Bits (
     xor,
  )
-
+import qualified Data.ByteString as B
+import Data.Data (Proxy (Proxy))
+import Data.IORef (IORef)
+import Data.Interval (Boundary (Closed, Open), Extended (Finite), Interval, interval, lowerBound, upperBound)
+import qualified Data.IntervalSet as IS
+import qualified Data.Map.Strict as Map
+import qualified Data.Set as Set
 import GHC.Word (
     Word8,
  )
-
-import qualified Data.Map.Strict as Map
-import qualified Data.Set as Set
-
+import Network.HTTP.Types (ByteRange (ByteRangeSuffix))
 import System.Directory (
     removeDirectoryRecursive,
  )
-
 import System.IO.Temp (
     createTempDirectory,
     getCanonicalTemporaryDirectory,
  )
-
-import Test.Hspec (
-    Spec,
-    context,
-    describe,
-    it,
-    shouldBe,
-    shouldThrow,
- )
-import Test.QuickCheck (
-    Gen,
-    NonEmptyList (getNonEmpty),
-    NonNegative (NonNegative),
-    Positive (..),
-    Property,
-    chooseInteger,
-    counterexample,
-    forAll,
-    ioProperty,
-    oneof,
-    property,
-    vector,
-    (==>),
- )
-
-import Test.QuickCheck.Monadic (
-    monadicIO,
-    run,
+import Tahoe.Storage.Testing.Spec (
+    ShareNumbers (..),
+    genStorageIndex,
+    makeStorageSpec,
  )
-
-import qualified Data.ByteString as B
-
 import TahoeLAFS.Storage.API (
     AllocateBuckets (AllocateBuckets),
     AllocationResult (AllocationResult),
@@ -88,7 +55,6 @@ import TahoeLAFS.Storage.API (
     toInteger,
     writev,
  )
-
 import TahoeLAFS.Storage.Backend (
     Backend (
         abortImmutableUpload,
@@ -102,155 +68,54 @@ import TahoeLAFS.Storage.Backend (
     WriteImmutableError (..),
     writeMutableShare,
  )
-
-import Data.IORef (IORef)
-
-import Lib (
-    ShareNumbers (..),
-    genStorageIndex,
+import TahoeLAFS.Storage.Backend.Filesystem (
+    FilesystemBackend (FilesystemBackend),
  )
-
 import TahoeLAFS.Storage.Backend.Memory (
     MemoryBackend (..),
-    MutableShareSize (MutableShareSize),
+    MutableShareSize (..),
+    SecretProtected (..),
+    addShares,
     memoryBackend,
     shareDataSize,
     toMutableShareSize,
  )
-
-import Data.Data (Proxy (Proxy))
-import Data.Interval (Boundary (Closed, Open), Extended (Finite), Interval, interval, lowerBound, upperBound)
-import qualified Data.IntervalSet as IS
-import TahoeLAFS.Storage.Backend.Filesystem (
-    FilesystemBackend (FilesystemBackend),
+import TahoeLAFS.Storage.Backend.Util (
+    queryRangeToReadVector,
+    readvToQueryRange,
+ )
+import Test.Hspec (
+    Spec,
+    context,
+    describe,
+    it,
+    shouldBe,
+    shouldThrow,
+ )
+import Test.QuickCheck (
+    Gen,
+    NonEmptyList (getNonEmpty),
+    NonNegative (NonNegative),
+    Positive (..),
+    Property,
+    chooseInteger,
+    counterexample,
+    forAll,
+    ioProperty,
+    oneof,
+    property,
+    vector,
+    (==>),
  )
 import Test.QuickCheck.Classes (Laws (..), semigroupMonoidLaws)
-
-permuteShare :: B.ByteString -> ShareNumber -> B.ByteString
-permuteShare seed number =
-    B.map xor' seed
-  where
-    xor' :: Word8 -> Word8
-    xor' = xor $ fromInteger $ toInteger number
-
-writeShares ::
-    (ShareNumber -> ShareData -> Maybe a -> IO ()) ->
-    [(ShareNumber, ShareData)] ->
-    IO ()
-writeShares _write [] = return ()
-writeShares write ((shareNumber, shareData) : rest) = do
-    -- TODO For now we'll do single complete writes.  Later try breaking up the data.
-    write shareNumber shareData Nothing
-    writeShares write rest
-
-alreadyHavePlusAllocatedImm ::
-    (Backend b, Mess b) =>
-    IO b -> -- The backend on which to operate
-    StorageIndex -> -- The storage index to use
-    ShareNumbers -> -- The share numbers to allocate
-    Positive Size -> -- The size of each share
-    Property
-alreadyHavePlusAllocatedImm makeBackend storageIndex (ShareNumbers shareNumbers) (Positive size) = monadicIO $
-    run $
-        withBackend makeBackend $ \backend -> do
-            result <- createImmutableStorageIndex backend storageIndex (Just [anUploadSecret]) $ AllocateBuckets shareNumbers size
-            when (alreadyHave result ++ allocated result /= shareNumbers) $
-                fail
-                    ( show (alreadyHave result)
-                        ++ " ++ "
-                        ++ show (allocated result)
-                        ++ " /= "
-                        ++ show shareNumbers
-                    )
-
--- The share numbers of immutable share data written to the shares of a given
--- storage index can be retrieved.
-immutableWriteAndEnumerateShares ::
-    (Backend b, Mess b) =>
-    IO b ->
-    StorageIndex ->
-    ShareNumbers ->
-    B.ByteString ->
-    Property
-immutableWriteAndEnumerateShares makeBackend storageIndex (ShareNumbers shareNumbers) shareSeed = monadicIO $ do
-    let permutedShares = Prelude.map (permuteShare shareSeed) shareNumbers
-        size = fromIntegral (B.length shareSeed)
-        allocate = AllocateBuckets shareNumbers size
-    run $
-        withBackend makeBackend $ \backend -> do
-            void $ createImmutableStorageIndex backend storageIndex uploadSecret allocate
-            writeShares (\sn -> writeImmutableShare backend storageIndex sn uploadSecret) (zip shareNumbers permutedShares)
-            readShareNumbers <- getImmutableShareNumbers backend storageIndex
-            when (readShareNumbers /= (CBORSet . Set.fromList $ shareNumbers)) $
-                fail (show readShareNumbers ++ " /= " ++ show shareNumbers)
-  where
-    uploadSecret = Just [anUploadSecret]
-
--- Immutable share data written to the shares of a given storage index cannot
--- be rewritten by a subsequent writeImmutableShare operation.
-immutableWriteAndRewriteShare ::
-    (Backend b, Mess b) =>
-    IO b ->
-    StorageIndex ->
-    ShareNumbers ->
-    B.ByteString ->
-    Property
-immutableWriteAndRewriteShare makeBackend storageIndex (ShareNumbers shareNumbers) shareSeed = monadicIO $ do
-    let size = fromIntegral (B.length shareSeed)
-        allocate = AllocateBuckets shareNumbers size
-        aShareNumber = head shareNumbers
-        aShare = permuteShare shareSeed aShareNumber
-    run $
-        withBackend makeBackend $ \backend -> do
-            void $ createImmutableStorageIndex backend storageIndex uploadSecret allocate
-            let write = writeImmutableShare backend storageIndex aShareNumber uploadSecret aShare Nothing
-            write
-            write `shouldThrow` (== ImmutableShareAlreadyWritten)
-  where
-    uploadSecret = Just [anUploadSecret]
-
--- Immutable share data written to the shares of a given storage index can be
--- retrieved verbatim and associated with the same share numbers as were
--- specified during writing.
-immutableWriteAndReadShare ::
-    (Backend b, Mess b) =>
-    IO b ->
-    StorageIndex ->
-    ShareNumbers ->
-    B.ByteString ->
-    Property
-immutableWriteAndReadShare makeBackend storageIndex (ShareNumbers shareNumbers) shareSeed = monadicIO $ do
-    let permutedShares = Prelude.map (permuteShare shareSeed) shareNumbers
-    let size = fromIntegral (B.length shareSeed)
-    let allocate = AllocateBuckets shareNumbers size
-    run $
-        withBackend makeBackend $ \backend -> do
-            void $ createImmutableStorageIndex backend storageIndex uploadSecret allocate
-            writeShares (\sn -> writeImmutableShare backend storageIndex sn uploadSecret) (zip shareNumbers permutedShares)
-            readShares' <- mapM (\sn -> readImmutableShare backend storageIndex sn Nothing) shareNumbers
-            when (permutedShares /= readShares') $
-                fail (show permutedShares ++ " /= " ++ show readShares')
-  where
-    uploadSecret = Just [anUploadSecret]
-
--- The share numbers of mutable share data written to the shares of a given
--- storage index can be retrieved.
-mutableWriteAndEnumerateShares ::
-    (Backend b, Mess b) =>
-    IO b ->
-    StorageIndex ->
-    ShareNumbers ->
-    B.ByteString ->
-    Property
-mutableWriteAndEnumerateShares makeBackend storageIndex (ShareNumbers shareNumbers) shareSeed = monadicIO $ do
-    let permutedShares = Prelude.map (permuteShare shareSeed) shareNumbers
-    let nullSecret = WriteEnablerSecret ""
-    run $
-        withBackend makeBackend $ \backend -> do
-            writeShares (\sn sh -> writeMutableShare backend storageIndex sn nullSecret sh) (zip shareNumbers permutedShares)
-            (CBORSet readShareNumbers) <- getMutableShareNumbers backend storageIndex
-            when (readShareNumbers /= Set.fromList shareNumbers) $
-                fail (show readShareNumbers ++ " /= " ++ show shareNumbers)
+import Test.QuickCheck.Monadic (
+    monadicIO,
+    run,
+ )
+import Prelude hiding (
+    lookup,
+    toInteger,
+ )
 
 -- | Create a Spec that checks the given Laws.
 lawsCheck :: Laws -> Spec
@@ -260,154 +125,10 @@ lawsCheck Laws{lawsTypeclass, lawsProperties} =
   where
     oneLawProp (lawName, lawProp) = it lawName lawProp
 
--- | The specification for a storage backend.
-storageSpec :: (Backend b, Mess b) => IO b -> Spec
-storageSpec makeBackend = do
-    context "v1" $ do
-        context "immutable" $ do
-            describe "allocate a storage index" $
-                it "accounts for all allocated share numbers" $
-                    property $
-                        forAll genStorageIndex (alreadyHavePlusAllocatedImm makeBackend)
-
-        context "write a share" $ do
-            it "disallows writes without an upload secret" $
-                property $
-                    withBackend makeBackend $ \backend -> do
-                        AllocationResult [] [ShareNumber 0] <- createImmutableStorageIndex backend "storageindex" (Just [anUploadSecret]) (AllocateBuckets [ShareNumber 0] 100)
-                        writeImmutableShare backend "storageindex" (ShareNumber 0) Nothing "fooooo" Nothing `shouldThrow` (== MissingUploadSecret)
-
-            it "disallows writes without a matching upload secret" $
-                property $
-                    withBackend makeBackend $ \backend -> do
-                        AllocationResult [] [ShareNumber 0] <- createImmutableStorageIndex backend "storageindex" (Just [anUploadSecret]) (AllocateBuckets [ShareNumber 0] 100)
-                        -- Supply the wrong secret as an upload secret and the
-                        -- right secret marked for some other use - this
-                        -- should still fail.
-                        writeImmutableShare backend "storageindex" (ShareNumber 0) (Just [Upload (UploadSecret "wrongsecret")]) "fooooo" Nothing `shouldThrow` (== IncorrectUploadSecret)
-
-            it "disallows aborts without an upload secret" $
-                property $
-                    withBackend makeBackend $ \backend -> do
-                        abortImmutableUpload backend "storageindex" (ShareNumber 0) Nothing `shouldThrow` (== MissingUploadSecret)
-
-            it "disallows aborts without a matching upload secret" $
-                property $
-                    withBackend makeBackend $ \backend -> do
-                        AllocationResult [] [ShareNumber 0] <- createImmutableStorageIndex backend "storageindex" (Just [anUploadSecret]) (AllocateBuckets [ShareNumber 0] 100)
-                        abortImmutableUpload backend "storageindex" (ShareNumber 0) (Just [Upload (UploadSecret "wrongsecret")]) `shouldThrow` (== IncorrectUploadSecret)
-
-            it "allows aborts with a matching upload secret" $
-                property $
-                    withBackend makeBackend $ \backend -> do
-                        AllocationResult [] [ShareNumber 0] <- createImmutableStorageIndex backend "storageindex" (Just [anUploadSecret]) (AllocateBuckets [ShareNumber 0] 100)
-                        abortImmutableUpload backend "storageindex" (ShareNumber 0) (Just [anUploadSecret])
-
-            it "returns the share numbers that were written" $
-                property $
-                    forAll genStorageIndex (immutableWriteAndEnumerateShares makeBackend)
-
-            it "returns the written data when requested" $
-                property $
-                    forAll genStorageIndex (immutableWriteAndReadShare makeBackend)
-
-            it "cannot be written more than once" $
-                property $
-                    forAll genStorageIndex (immutableWriteAndRewriteShare makeBackend)
-
-        context "mutable" $ do
-            -- XXX There's lots of problems around supplying negative integer
-            -- values in most places.  We avoid tripping over those cases here
-            -- but we should really fix the implementation to deal with them
-            -- sensible.
-            describe "write a share" $ do
-                it "returns the share numbers that were written" $
-                    property $
-                        forAll genStorageIndex (mutableWriteAndEnumerateShares makeBackend)
-
-                it "rejects an update with the wrong write enabler" $
-                    forAll genStorageIndex $ \storageIndex shareNum (secret, wrongSecret) (shareData, junkData) (NonNegative offset) ->
-                        (secret /= wrongSecret)
-                            && (shareData /= junkData)
-                            && (B.length shareData > 0)
-                            && (B.length junkData > 0)
-                            ==> monadicIO
-                                . run
-                                . withBackend makeBackend
-                            $ \backend -> do
-                                first <- readvAndTestvAndWritev backend storageIndex (WriteEnablerSecret secret) (writev shareNum offset shareData)
-                                success first `shouldBe` True
-                                readvAndTestvAndWritev backend storageIndex (WriteEnablerSecret wrongSecret) (writev shareNum offset junkData)
-                                    `shouldThrow` (== IncorrectWriteEnablerSecret)
-                                third <- readvAndTestvAndWritev backend storageIndex (WriteEnablerSecret secret) (readv offset (fromIntegral $ B.length shareData))
-                                readData third `shouldBe` Map.singleton shareNum [shareData]
-
-                it "overwrites older data with newer data" $
-                    -- XXX We go out of our way to generate a legal storage
-                    -- index here.  Illegal storage indexes aren't checked by
-                    -- the system anywhere but they really ought to be.
-                    forAll genStorageIndex $ \storageIndex (readVectors :: NonEmptyList ReadVector) secret shareNum -> do
-                        let is = readVectorToIntervalSet (getNonEmpty readVectors)
-                            sp = IS.span is
-                            (lower, upper) = toFiniteBounds sp
-                            size = upper - lower
-                        bs <- B.pack <$> vector (fromIntegral size)
-                        writeVectors <- writesThatResultIn bs lower size
-                        pure $
-                            counterexample ("write vectors: " <> show writeVectors) $
-                                ioProperty $
-                                    withBackend makeBackend $ \backend -> do
-                                        let x = foldMap (\(WriteVector off shareData) -> writev shareNum off shareData) writeVectors
-                                        writeResult <- readvAndTestvAndWritev backend storageIndex (WriteEnablerSecret secret) x
-                                        success writeResult `shouldBe` True
-
-                                        let y = foldMap (\(ReadVector off sz) -> readv off sz) (getNonEmpty readVectors)
-                                        readResult <- readvAndTestvAndWritev backend storageIndex (WriteEnablerSecret secret) y
-                                        Map.map B.concat (readData readResult)
-                                            `shouldBe` Map.singleton shareNum (B.concat $ extractRead lower bs <$> getNonEmpty readVectors)
-
-extractRead :: Integral a => a -> B.ByteString -> ReadVector -> B.ByteString
-extractRead lower bs (ReadVector offset size) = B.take (fromIntegral size) . B.drop (fromIntegral offset - fromIntegral lower) $ bs
-
-toFiniteBounds :: Show r => Interval r -> (r, r)
-toFiniteBounds i = (lower, upper)
-  where
-    lower = toFinite (lowerBound i)
-    upper = toFinite (upperBound i)
-
-    toFinite n = case n of
-        Finite r -> r
-        e -> error ("Non-finite bound " <> show e)
-
-readVectorToIntervalSet :: [ReadVector] -> IS.IntervalSet Integer
-readVectorToIntervalSet rvs = foldr IS.insert IS.empty (f <$> rvs)
-  where
-    f (ReadVector offset size) = interval (Finite offset, Closed) (Finite $ offset + size, Open)
-
-writesThatResultIn :: B.ByteString -> Offset -> Size -> Gen [WriteVector]
-writesThatResultIn "" _ _ = pure []
-writesThatResultIn bs offset size =
-    oneof
-        [ -- The whole thing as one write
-          pure [WriteVector offset bs]
-        , -- Or divide and conquer arbitrarily
-          do
-            prefixLen <- chooseInteger (0, fromIntegral $ B.length bs)
-            pfx <- writesThatResultIn (B.take (fromIntegral prefixLen) bs) offset prefixLen
-            sfx <- writesThatResultIn (B.drop (fromIntegral prefixLen) bs) (offset + prefixLen) (size - prefixLen)
-            pure $ pfx <> sfx
-        , -- Or write some other random somewhere in this range first, to
-          -- later be overwritten.
-          (:) <$> (WriteVector <$> chooseInteger (offset, offset + size) <*> (chooseInteger (1, size) >>= bytes)) <*> writesThatResultIn bs offset size
-        ]
-
-bytes :: Integer -> Gen B.ByteString
-bytes len = B.pack <$> vector (fromIntegral len)
-
 spec :: Spec
 spec = do
     context "utilities" $ do
-        describe "MutableShareStorage" $ do
+        describe "toMutableShareSize" $ do
             it "finds the larger size for some cases" $ do
                 toMutableShareSize (WriteVector 0 "x") <> toMutableShareSize (WriteVector 1 "x")
                     `shouldBe` MutableShareSize 0 2
@@ -421,6 +142,26 @@ spec = do
                 toMutableShareSize (WriteVector 0 "Hello") <> toMutableShareSize (WriteVector 3 "world")
                     `shouldBe` MutableShareSize 0 8
 
+        describe "addShares" $ do
+            it "prepends the new write to the share storage" $ do
+                let si = "storageindex"
+                    enabler = WriteEnablerSecret "enabler"
+                    addShares' = addShares si enabler
+                    shareNum = ShareNumber 0
+                    Just a = addShares' mempty (Map.fromList [(shareNum, [WriteVector 1 "first"])])
+                    Just b = addShares' a (Map.fromList [(shareNum, [WriteVector 2 "second"])])
+
+                b `shouldBe` Map.fromList [(si, SecretProtected enabler (Map.fromList [(shareNum, [WriteVector 2 "second", WriteVector 1 "first"])]))]
+
+            it "puts later elements in a single WriteVector list earlier in the MutableShareStorage list" $ do
+                let si = "storageindex"
+                    enabler = WriteEnablerSecret "enabler"
+                    addShares' = addShares si enabler
+                    shareNum = ShareNumber 0
+                    Just a = addShares' mempty (Map.fromList [(shareNum, [WriteVector 1 "first", WriteVector 2 "second"])])
+
+                a `shouldBe` Map.fromList [(si, SecretProtected enabler (Map.fromList [(shareNum, [WriteVector 2 "second", WriteVector 1 "first"])]))]
+
         describe "shareDataSize" $ do
             it "converts list of WriteVector to a size" $ do
                 shareDataSize [WriteVector 2 "foo", WriteVector 10 "quux"]
@@ -442,11 +183,16 @@ spec = do
             . semigroupMonoidLaws
             $ (Proxy :: Proxy ReadTestWriteVectors)
 
-    context "memory" $ storageSpec memoryBackend
-    context "filesystem" $ storageSpec filesystemBackend
+        describe "ReadVector" $ do
+            it "it round-trips through queryRangeToReadVector / readvToQueryRange" $
+                property $ \rvs ->
+                    (queryRangeToReadVector 1234 . readvToQueryRange) rvs `shouldBe` rvs
 
-anUploadSecret :: LeaseSecret
-anUploadSecret = Upload $ UploadSecret "anuploadsecret"
+            it "imposes a lower bound of zero on offset" $ do
+                queryRangeToReadVector 1 (Just [ByteRangeSuffix 2]) `shouldBe` [ReadVector 0 1]
+
+    context "memory" $ makeStorageSpec memoryBackend cleanupMemory
+    context "filesystem" $ makeStorageSpec filesystemBackend cleanupFilesystem
 
 filesystemBackend :: IO FilesystemBackend
 filesystemBackend = do
@@ -457,18 +203,5 @@ createTemporaryDirectory = do
     parent <- getCanonicalTemporaryDirectory
     createTempDirectory parent "gbs-semanticspec"
 
-class Mess a where
-    -- Cleanup resources belonging to m
-    cleanup :: a -> IO ()
-
-instance Mess FilesystemBackend where
-    cleanup (FilesystemBackend path) = removeDirectoryRecursive path
-
-instance Mess (IORef MemoryBackend) where
-    cleanup _ = pure ()
-
-withBackend :: (Mess b, Backend b) => IO b -> ((b -> IO ()) -> IO ())
-withBackend b action = do
-    backend <- b
-    action backend
-    cleanup backend
+cleanupFilesystem (FilesystemBackend path) = removeDirectoryRecursive path
+cleanupMemory _ = pure ()