One storage API for local disks, object stores, and remote filesystems.
Applications often need to store files in different places:
- Local disks during development
- Object storage like S3 or GCS in production
- Remote filesystems like SFTP or FTP
- Cloud providers or custom remotes
Each backend has its own API and client library.
storage provides a small, consistent interface so your application code doesn't have to change when the backend changes.
Each driver is thoroughly tested against the shared test suite using testcontainers or emulators where appropriate.
| Driver | Kind | Notes |
|---|---|---|
| Local filesystem | Good default for local development and tests. | |
| In-memory | Best zero-dependency backend for tests and ephemeral workflows. | |
| Distributed memory | Good for temporary distributed blob storage with explicit size and durability tradeoffs. | |
| Remote filesystem | Embedded integration fixture in the shared matrix. | |
| Remote filesystem | Container-backed integration coverage in the shared matrix. | |
| Object storage | MinIO-backed integration coverage in the shared matrix. | |
| Object storage | Emulator-backed integration coverage via fake-gcs-server. | |
| Object storage | Returns temporary links; external integration strategy still open. | |
| Breadth driver | Depends on the underlying rclone remote; see the rclone storage systems overview. |
Root module:
go get github.com/goforj/storageThen add the driver modules you need, for example:
go get github.com/goforj/storage/driver/localstorage
go get github.com/goforj/storage/driver/memorystorage
go get github.com/goforj/storage/driver/redisstorage
go get github.com/goforj/storage/driver/ftpstorage
go get github.com/goforj/storage/driver/sftpstorage
go get github.com/goforj/storage/driver/s3storage
go get github.com/goforj/storage/driver/gcsstorage
go get github.com/goforj/storage/driver/dropboxstorage
go get github.com/goforj/storage/driver/rclonestorageChoose the construction style that fits your application:
- Use a driver constructor like
localstorage.New(...)when you want a single backend directly. - Use
storage.Build(...)when you want one backend through the sharedstorageAPI. - Use
storage.New(...)when you want multiple named disks managed from config.
All storage operations also expose *Context equivalents for deadlines and cancellation. The default methods use context.Background().
package main
import (
"errors"
"fmt"
"log"
"github.com/goforj/storage"
"github.com/goforj/storage/driver/localstorage"
)
func main() {
disk, err := storage.Build(localstorage.Config{
Root: "/tmp/storage",
})
if err != nil {
log.Fatal(err)
}
// Put a file.
if err := disk.Put("docs/readme.txt", []byte("hello")); err != nil {
log.Fatal(err)
}
// Check whether the file exists.
ok, err := disk.Exists("docs/readme.txt")
if err != nil {
log.Fatal(err)
}
fmt.Println(ok)
// Output: true
// Read the file back.
data, err := disk.Get("docs/readme.txt")
if err != nil {
log.Fatal(err)
}
fmt.Println(string(data))
// Output: hello
// List the parent directory.
entries, err := disk.List("docs")
if err != nil {
log.Fatal(err)
}
fmt.Println(entries[0].Path)
// Output: docs/readme.txt
// Delete the file.
if err := disk.Delete("docs/readme.txt"); err != nil {
log.Fatal(err)
}
// Ask the backend for an access URL when supported.
url, err := disk.URL("docs/readme.txt")
switch {
case err == nil:
fmt.Println(url)
case errors.Is(err, storage.ErrUnsupported):
fmt.Println("url generation unsupported")
// Output: url generation unsupported
default:
log.Fatal(err)
}
}package main
import (
"log"
"github.com/goforj/storage"
"github.com/goforj/storage/driver/localstorage"
)
func main() {
// Build one disk through the shared storage API.
built, err := storage.Build(localstorage.Config{
Root: "/tmp/storage",
Prefix: "scratch",
})
if err != nil {
log.Fatal(err)
}
// Or construct the driver directly.
direct, err := localstorage.New(localstorage.Config{
Root: "/tmp/storage",
Prefix: "scratch",
})
if err != nil {
log.Fatal(err)
}
_, _ = built, direct
}package main
import (
"log"
"github.com/goforj/storage"
"github.com/goforj/storage/driver/localstorage"
"github.com/goforj/storage/driver/s3storage"
)
func main() {
// Build a manager with multiple named disks.
mgr, err := storage.New(storage.Config{
Default: "assets",
Disks: map[storage.DiskName]storage.DriverConfig{
"assets": localstorage.Config{
Root: "/tmp/storage",
Prefix: "assets",
},
"uploads": s3storage.Config{
Bucket: "app-uploads",
Region: "us-east-1",
Endpoint: "http://localhost:9000",
AccessKeyID: "minioadmin",
SecretAccessKey: "minioadmin",
UsePathStyle: true,
Prefix: "uploads",
},
},
})
if err != nil {
log.Fatal(err)
}
// Resolve a disk by name.
disk, err := mgr.Disk("assets")
if err != nil {
log.Fatal(err)
}
// Put a file into the disk.
if err := disk.Put("hello.txt", []byte("hello")); err != nil {
log.Fatal(err)
}
// Read the file back.
data, err := disk.Get("hello.txt")
if err != nil {
log.Fatal(err)
}
_ = data // []byte("hello")
}Use rclonestorage when you want to access rclone-backed remotes through the storage interface.
package main
import (
"log"
"github.com/goforj/storage/driver/rclonestorage"
)
const rcloneConfig = `
[localdisk]
type = local
`
func main() {
// Build an rclone-backed disk from inline rclone config.
disk, err := rclonestorage.New(rclonestorage.Config{
Remote: "localdisk:/tmp/storage",
Prefix: "sandbox",
RcloneConfigData: rcloneConfig,
})
if err != nil {
log.Fatal(err)
}
// Put a file through rclone.
if err := disk.Put("rclone.txt", []byte("hello")); err != nil {
log.Fatal(err)
}
// List files from the disk root.
entries, err := disk.List("")
if err != nil {
log.Fatal(err)
}
_ = entries // rclone.txt
}See examples for runnable examples.
package main
import (
"testing"
"github.com/goforj/storage"
"github.com/goforj/storage/driver/memorystorage"
"github.com/goforj/storage/storagetest"
)
func TestUpload(t *testing.T) {
// Create one fake disk.
disk := storagetest.Fake(t)
_ = disk.Put("photo.jpg", []byte("ok"))
// Or create a fake manager with named in-memory disks.
mgr := storagetest.FakeManager(t, "photos", map[storage.DiskName]memorystorage.Config{
"photos": {Prefix: "photos"},
"avatars": {Prefix: "avatars"},
})
photos, _ := mgr.Disk("photos")
_ = photos.Put("one.jpg", []byte("ok"))
}Benchmarks are rendered from docs/bench and compare the shared storage contract across representative backends.
Run the renderer with:
cd docs/bench
go test -tags benchrender . -run TestRenderBenchmarks -count=1 -vEach chart sample uses a fixed measurement window per driver, so the ops chart remains meaningful without unbounded benchmark calibration.
Notes:
gcsuses fake-gcs-server.ftpis included by default and now reuses a logged-in control connection per storage instance during the benchmark run.redis,s3, andsftpuse testcontainers; include them withBENCH_WITH_DOCKER=1or by explicitly settingBENCH_DRIVER.rclone_localmeasures rclone overhead on top of a local filesystem remote.
| Driver | Stat | Copy | Move | Walk | URL | Context |
|---|---|---|---|---|---|---|
| ✓ | ✓ | ✓ | ✓ | ✗ | ✓ | |
| ✓ | ✓ | ✓ | ✓ | ✗ | ✓ | |
| ✓ | ✓ | ✓ | ✓ | ✗ | ✓ | |
| ✓ | ✓ | ✓ | ✓ | ✗ | ✓ | |
| ✓ | ✓ | ✓ | ✓ | ✗ | ✓ | |
| ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | |
| ✓ | ✓ | ✓ | ✓ | ~ | ✓ | |
| ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | |
| ✓ | ✓ | ✓ | ✓ | ~ | ✓ |
~ indicates backend- or environment-dependent behavior. For example, GCS URL generation is unavailable in emulator mode and rclone URL support depends on the underlying remote.
The API section below is autogenerated; do not edit between the markers.
LocalRemote defines a local backend configuration.
Example: define a local remote
remote := rclonestorage.LocalRemote{Name: "local"}
fmt.Println(remote.Name)
// Output: localExample: define a local remote with all fields
remote := rclonestorage.LocalRemote{
Name: "local",
}
fmt.Println(remote.Name)
// Output: localMustRenderLocal panics on error.
cfg := rclonestorage.MustRenderLocal(rclonestorage.LocalRemote{Name: "local"})
fmt.Println(cfg)
// Output:
// [local]
// type = localMustRenderS3 panics on error.
cfg := rclonestorage.MustRenderS3(rclonestorage.S3Remote{
Name: "assets",
Region: "us-east-1",
AccessKeyID: "key",
SecretAccessKey: "secret",
})
fmt.Println(cfg)
// Output:
// [assets]
// type = s3
// provider = AWS
// access_key_id = key
// secret_access_key = secret
// region = us-east-1RenderLocal returns ini-formatted rclone config for a local backend.
cfg, _ := rclonestorage.RenderLocal(rclonestorage.LocalRemote{Name: "local"})
fmt.Println(cfg)
// Output:
// [local]
// type = localRenderS3 returns ini-formatted rclone config content for a single S3 remote.
cfg, _ := rclonestorage.RenderS3(rclonestorage.S3Remote{
Name: "assets",
Region: "us-east-1",
AccessKeyID: "key",
SecretAccessKey: "secret",
})
fmt.Println(cfg)
// Output:
// [assets]
// type = s3
// provider = AWS
// access_key_id = key
// secret_access_key = secret
// region = us-east-1S3Remote defines parameters for constructing an rclone S3 remote.
Example: define an s3 remote
remote := rclonestorage.S3Remote{
Name: "assets",
Region: "us-east-1",
AccessKeyID: "key",
SecretAccessKey: "secret",
}
fmt.Println(remote.Name)
// Output: assetsExample: define an s3 remote with all fields
remote := rclonestorage.S3Remote{
Name: "assets",
Endpoint: "http://localhost:9000", // default: ""
Region: "us-east-1",
AccessKeyID: "key",
SecretAccessKey: "secret",
Provider: "AWS", // default: "AWS"
PathStyle: false, // default: false
BucketACL: "private", // default: ""
UseUnsignedPayload: false, // default: false
}
fmt.Println(remote.Name)
// Output: assetsBuild constructs a single storage backend from a typed driver config without a Manager.
fs, _ := storage.Build(localstorage.Config{
Root: "/tmp/storage-example",
Prefix: "assets",
})DriverConfig is implemented by typed driver configs such as local.Config or s3storage.Config. It is the public config boundary for Manager and Build.
var cfg storage.DriverConfig = localstorage.Config{
Root: "/tmp/storage-config",
}DriverFactory constructs a Storage for a given normalized disk configuration.
factory := storage.DriverFactory(func(ctx context.Context, cfg storage.ResolvedConfig) (storage.Storage, error) {
return nil, nil
})ResolvedConfig is the normalized internal config passed to registered drivers. Users should prefer typed driver configs and treat this as registry adapter glue, not the primary construction API.
factory := storage.DriverFactory(func(ctx context.Context, cfg storage.ResolvedConfig) (storage.Storage, error) {
fmt.Println(cfg.Driver)
// Output: memory
return nil, nil
})
_, _ = factory(context.Background(), storage.ResolvedConfig{Driver: "memory"})BuildContext constructs a single storage backend from a typed driver config using the caller-provided context.
ContextStorage exposes context-aware storage operations for cancellation and deadlines. Use Storage for the common path and type-assert to ContextStorage when you need caller-provided context.
CopyContext copies the object at src to dst using the caller-provided context.
DeleteContext removes the object at path using the caller-provided context.
ExistsContext reports whether an object exists at path using the caller-provided context.
GetContext reads the object at path using the caller-provided context.
disk, _ := storage.Build(localstorage.Config{
Root: "/tmp/storage-get-context",
})
_ = disk.Put("docs/readme.txt", []byte("hello"))
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
cs := disk.(storage.ContextStorage)
data, _ := cs.GetContext(ctx, "docs/readme.txt")
fmt.Println(string(data))
// Output: helloListContext returns the immediate children under path using the caller-provided context.
MoveContext moves the object at src to dst using the caller-provided context.
PutContext writes an object at path using the caller-provided context.
StatContext returns the entry at path using the caller-provided context.
URLContext returns a usable access URL using the caller-provided context.
WalkContext visits entries recursively using the caller-provided context.
DiskName is a typed identifier for configured disks.
const uploads storage.DiskName = "uploads"
fmt.Println(uploads)
// Output: uploadsEntry represents an item returned by List.
Path is relative to the storage namespace, not an OS-native path. Directory-like entries are listing artifacts, not a promise of POSIX-style storage semantics.
entry := storage.Entry{
Path: "docs/readme.txt",
Size: 5,
IsDir: false,
}
fmt.Println(entry.Path, entry.IsDir)
// Output: docs/readme.txt falseStorage is the public interface for interacting with a storage backend.
Semantics:
- Put overwrites an existing object at the same path.
- List is one-level and non-recursive.
- List with an empty path lists from the disk root or prefix root.
- Walk is recursive.
- URL returns a usable access URL when the driver supports it.
- Copy overwrites the destination object when the backend supports copy semantics.
- Move relocates an object and may be implemented as copy followed by delete.
- Unsupported operations should return ErrUnsupported.
var disk storage.Storage
disk, _ = storage.Build(localstorage.Config{
Root: "/tmp/storage-interface",
})Copy copies the object at src to dst.
disk, _ := storage.Build(localstorage.Config{
Root: "/tmp/storage-copy",
})
_ = disk.Put("docs/readme.txt", []byte("hello"))
_ = disk.Copy("docs/readme.txt", "docs/copy.txt")
data, _ := disk.Get("docs/copy.txt")
fmt.Println(string(data))
// Output: helloDelete removes the object at path.
disk, _ := storage.Build(localstorage.Config{
Root: "/tmp/storage-delete",
})
_ = disk.Put("docs/readme.txt", []byte("hello"))
_ = disk.Delete("docs/readme.txt")
ok, _ := disk.Exists("docs/readme.txt")
fmt.Println(ok)
// Output: falseExists reports whether an object exists at path.
disk, _ := storage.Build(localstorage.Config{
Root: "/tmp/storage-exists",
})
_ = disk.Put("docs/readme.txt", []byte("hello"))
ok, _ := disk.Exists("docs/readme.txt")
fmt.Println(ok)
// Output: trueGet reads the object at path.
disk, _ := storage.Build(localstorage.Config{
Root: "/tmp/storage-get",
})
_ = disk.Put("docs/readme.txt", []byte("hello"))
data, _ := disk.Get("docs/readme.txt")
fmt.Println(string(data))
// Output: helloList returns the immediate children under path.
disk, _ := storage.Build(localstorage.Config{
Root: "/tmp/storage-list",
})
_ = disk.Put("docs/readme.txt", []byte("hello"))
entries, _ := disk.List("docs")
fmt.Println(entries[0].Path)
// Output: docs/readme.txtMove moves the object at src to dst.
disk, _ := storage.Build(localstorage.Config{
Root: "/tmp/storage-move",
})
_ = disk.Put("docs/readme.txt", []byte("hello"))
_ = disk.Move("docs/readme.txt", "docs/archive.txt")
ok, _ := disk.Exists("docs/readme.txt")
fmt.Println(ok)
// Output: falsePut writes an object at path, overwriting any existing object.
disk, _ := storage.Build(localstorage.Config{
Root: "/tmp/storage-put",
})
_ = disk.Put("docs/readme.txt", []byte("hello"))
fmt.Println("stored")
// Output: storedStat returns the entry at path.
disk, _ := storage.Build(localstorage.Config{
Root: "/tmp/storage-stat",
})
_ = disk.Put("docs/readme.txt", []byte("hello"))
entry, _ := disk.Stat("docs/readme.txt")
fmt.Println(entry.Path, entry.Size)
// Output: docs/readme.txt 5URL returns a usable access URL when the driver supports it.
Example: request an object url
disk, _ := storage.Build(s3storage.Config{
Bucket: "uploads",
Region: "us-east-1",
})
url, _ := disk.URL("docs/readme.txt")Example: handle unsupported url generation
disk, _ := storage.Build(localstorage.Config{
Root: "/tmp/storage-url",
})
_, err := disk.URL("docs/readme.txt")
fmt.Println(errors.Is(err, storage.ErrUnsupported))
// Output: trueWalk visits entries recursively when the backend supports it.
disk, _ := storage.Build(localstorage.Config{
Root: "/tmp/storage-walk",
})
err := disk.Walk("", func(entry storage.Entry) error {
fmt.Println(entry.Path)
return nil
})
fmt.Println(errors.Is(err, storage.ErrUnsupported))
// Output: trueConfig defines a Dropbox-backed storage disk.
Example: define dropbox storage config
cfg := dropboxstorage.Config{
Token: "token",
}Example: define dropbox storage config with all fields
cfg := dropboxstorage.Config{
Token: "token",
Prefix: "uploads", // default: ""
}Config defines an FTP-backed storage disk.
Example: define ftp storage config
cfg := ftpstorage.Config{
Host: "127.0.0.1",
User: "demo",
Password: "secret",
}Example: define ftp storage config with all fields
cfg := ftpstorage.Config{
Host: "127.0.0.1",
Port: 21, // default: 21
User: "demo", // default: ""
Password: "secret", // default: ""
TLS: false, // default: false
InsecureSkipVerify: false, // default: false
Prefix: "uploads", // default: ""
}Config defines a GCS-backed storage disk.
Example: define gcs storage config
cfg := gcsstorage.Config{
Bucket: "uploads",
}Example: define gcs storage config with all fields
cfg := gcsstorage.Config{
Bucket: "uploads",
CredentialsJSON: "{...}", // default: ""
Endpoint: "http://127.0.0.1:0", // default: ""
Prefix: "assets", // default: ""
}Config defines local storage rooted at a filesystem path.
Example: define local storage config
cfg := localstorage.Config{
Root: "/tmp/storage-local",
Prefix: "sandbox",
}Example: define local storage config with all fields
cfg := localstorage.Config{
Root: "/tmp/storage-local",
Prefix: "sandbox", // default: ""
}Config defines an in-memory storage disk.
Example: define memory storage config
cfg := memorystorage.Config{}Example: define memory storage config with all fields
cfg := memorystorage.Config{
Prefix: "sandbox", // default: ""
}Config defines an rclone-backed storage disk.
Example: define rclone storage config
cfg := rclonestorage.Config{
Remote: "local:",
Prefix: "sandbox",
}Example: define rclone storage config with all fields
cfg := rclonestorage.Config{
Remote: "local:",
Prefix: "sandbox", // default: ""
RcloneConfigPath: "/path/to/rclone.conf", // default: ""
RcloneConfigData: "[local]\ntype = local\n", // default: ""
}Config defines an S3-backed storage disk.
Example: define s3 storage config
cfg := s3storage.Config{
Bucket: "uploads",
Region: "us-east-1",
}Example: define s3 storage config with all fields
cfg := s3storage.Config{
Bucket: "uploads",
Endpoint: "http://localhost:9000", // default: ""
Region: "us-east-1",
AccessKeyID: "minioadmin", // default: ""
SecretAccessKey: "minioadmin", // default: ""
UsePathStyle: true, // default: false
UnsignedPayload: false, // default: false
Prefix: "assets", // default: ""
}Config defines an SFTP-backed storage disk.
Example: define sftp storage config
cfg := sftpstorage.Config{
Host: "127.0.0.1",
User: "demo",
Password: "secret",
}Example: define sftp storage config with all fields
cfg := sftpstorage.Config{
Host: "127.0.0.1",
Port: 22, // default: 22
User: "demo", // default: "root"
Password: "secret", // default: ""
KeyPath: "/path/id_ed25519", // default: ""
KnownHostsPath: "/path/known_hosts", // default: ""
InsecureIgnoreHostKey: false, // default: false
Prefix: "uploads", // default: ""
}New constructs Dropbox-backed storage using the official SDK.
fs, _ := dropboxstorage.New(dropboxstorage.Config{
Token: "token",
})New constructs FTP-backed storage using jlaffaye/ftp.
fs, _ := ftpstorage.New(ftpstorage.Config{
Host: "127.0.0.1",
User: "demo",
Password: "secret",
})New constructs GCS-backed storage using cloud.google.com/go/storage.
fs, _ := gcsstorage.New(gcsstorage.Config{
Bucket: "uploads",
})New constructs local storage rooted at cfg.Root with an optional prefix.
fs, _ := localstorage.New(localstorage.Config{
Root: "/tmp/storage-local",
Prefix: "sandbox",
})New constructs in-memory storage.
fs, _ := memorystorage.New(memorystorage.Config{
Prefix: "sandbox",
})New constructs an rclone-backed storage. All disks share a single config path.
Example: rclone storage
fs, _ := rclonestorage.New(rclonestorage.Config{
Remote: "local:",
Prefix: "sandbox",
})Example: rclone storage with inline config
fs, _ := rclonestorage.New(rclonestorage.Config{
Remote: "localdisk:/tmp/storage",
RcloneConfigData: `
[localdisk]
type = local
`,
})New constructs S3-backed storage using AWS SDK v2.
fs, _ := s3storage.New(s3storage.Config{
Bucket: "uploads",
Region: "us-east-1",
})New constructs SFTP-backed storage using ssh and pkg/sftp.
fs, _ := sftpstorage.New(sftpstorage.Config{
Host: "127.0.0.1",
User: "demo",
Password: "secret",
})Config defines named disks using typed driver configs.
cfg := storage.Config{
Default: "local",
Disks: map[storage.DiskName]storage.DriverConfig{
"local": localstorage.Config{Root: "/tmp/storage-manager"},
},
}Manager holds named storage disks.
mgr, _ := storage.New(storage.Config{
Default: "local",
Disks: map[storage.DiskName]storage.DriverConfig{
"local": localstorage.Config{Root: "/tmp/storage-manager"},
},
})Default returns the default disk or panics if misconfigured.
mgr, _ := storage.New(storage.Config{
Default: "local",
Disks: map[storage.DiskName]storage.DriverConfig{
"local": localstorage.Config{Root: "/tmp/storage-default"},
},
})
fs := mgr.Default()
fmt.Println(fs != nil)
// Output: trueDisk returns a named disk or an error if it does not exist.
mgr, _ := storage.New(storage.Config{
Default: "local",
Disks: map[storage.DiskName]storage.DriverConfig{
"local": localstorage.Config{Root: "/tmp/storage-default"},
"uploads": localstorage.Config{Root: "/tmp/storage-uploads"},
},
})
fs, _ := mgr.Disk("uploads")
fmt.Println(fs != nil)
// Output: trueNew constructs a Manager and eagerly initializes all disks.
mgr, _ := storage.New(storage.Config{
Default: "local",
Disks: map[storage.DiskName]storage.DriverConfig{
"local": localstorage.Config{Root: "/tmp/storage-local"},
"assets": localstorage.Config{Root: "/tmp/storage-assets", Prefix: "public"},
},
})RegisterDriver makes a driver available to the Manager. It panics on duplicate registrations.
storage.RegisterDriver("memory", func(ctx context.Context, cfg storage.ResolvedConfig) (storage.Storage, error) {
return nil, nil
})JoinPrefix combines a disk prefix with a path using slash separators.
fmt.Println(storage.JoinPrefix("assets", "logo.svg"))
// Output: assets/logo.svgNormalizePath cleans a user path, normalizes separators, and rejects attempts to escape the disk root or prefix root.
The empty string and root-like inputs normalize to the logical root.
p, _ := storage.NormalizePath(" /avatars//user-1.png ")
fmt.Println(p)
// Output: avatars/user-1.pngShared contract tests live in storagetest.
Centralized integration coverage lives in integration and runs the same contract across supported backends.
That centralized matrix is the authoritative integration path for the repository.
Current fixture types in the centralized matrix:
- testcontainers:
s3,sftp - emulator:
gcs - embedded/local fixtures:
local,ftp,rclone_local
Common contributor commands:
go test ./...cd integration
go test -tags=integration ./all -count=1Run a single integration backend:
cd integration
INTEGRATION_DRIVER=gcs go test -tags=integration ./all -count=1Make targets:
make test
make examples-test
make coverage
make integration
make integration-driver gcs