restic/src/restic/backend/s3/s3.go

301 lines
6.7 KiB
Go
Raw Normal View History

2015-05-10 17:20:58 +02:00
package s3
import (
"bytes"
"io"
2016-08-31 22:39:36 +02:00
"restic"
2015-05-10 17:20:58 +02:00
"strings"
2016-09-01 22:17:37 +02:00
"restic/errors"
"github.com/minio/minio-go"
2015-05-10 17:20:58 +02:00
"restic/debug"
2015-05-10 17:20:58 +02:00
)
const connLimit = 10
2015-05-10 17:20:58 +02:00
2016-01-26 22:19:10 +01:00
// s3 is a backend which stores the data on an S3 endpoint.
type s3 struct {
client *minio.Client
connChan chan struct{}
bucketname string
prefix string
}
// Open opens the S3 backend at bucket and region. The bucket is created if it
// does not exist yet.
2016-08-31 22:39:36 +02:00
func Open(cfg Config) (restic.Backend, error) {
2015-12-29 00:27:29 +01:00
debug.Log("s3.Open", "open, config %#v", cfg)
2015-05-10 17:20:58 +02:00
client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, !cfg.UseHTTP)
if err != nil {
2016-08-29 21:54:50 +02:00
return nil, errors.Wrap(err, "minio.New")
}
be := &s3{client: client, bucketname: cfg.Bucket, prefix: cfg.Prefix}
be.createConnections()
2016-08-21 16:14:58 +02:00
ok, err := client.BucketExists(cfg.Bucket)
if err != nil {
2016-01-09 21:24:21 +01:00
debug.Log("s3.Open", "BucketExists(%v) returned err %v, trying to create the bucket", cfg.Bucket, err)
2016-08-29 21:54:50 +02:00
return nil, errors.Wrap(err, "client.BucketExists")
2016-08-21 16:14:58 +02:00
}
2016-08-21 16:14:58 +02:00
if !ok {
// create new bucket with default ACL in default region
err = client.MakeBucket(cfg.Bucket, "")
if err != nil {
2016-08-29 21:54:50 +02:00
return nil, errors.Wrap(err, "client.MakeBucket")
}
}
return be, nil
}
2016-08-31 22:39:36 +02:00
func (be *s3) s3path(t restic.FileType, name string) string {
2016-02-14 15:40:15 +01:00
var path string
if be.prefix != "" {
path = be.prefix + "/"
}
path += string(t)
2016-08-31 22:39:36 +02:00
if t == restic.ConfigFile {
2016-02-14 15:40:15 +01:00
return path
}
return path + "/" + name
}
2016-01-26 22:19:10 +01:00
func (be *s3) createConnections() {
be.connChan = make(chan struct{}, connLimit)
for i := 0; i < connLimit; i++ {
be.connChan <- struct{}{}
}
2015-05-10 17:20:58 +02:00
}
// Location returns this backend's location (the bucket name).
2016-01-26 22:19:10 +01:00
func (be *s3) Location() string {
return be.bucketname
2015-05-10 17:20:58 +02:00
}
2016-01-23 14:12:12 +01:00
// Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt.
2016-08-31 22:39:36 +02:00
func (be s3) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
var obj *minio.Object
2016-01-23 14:12:12 +01:00
debug.Log("s3.Load", "%v, offset %v, len %v", h, off, len(p))
2016-09-01 21:19:30 +02:00
path := be.s3path(h.Type, h.Name)
2016-01-23 14:12:12 +01:00
<-be.connChan
defer func() {
be.connChan <- struct{}{}
}()
2016-08-07 14:50:24 +02:00
obj, err = be.client.GetObject(be.bucketname, path)
2016-08-07 14:50:24 +02:00
if err != nil {
debug.Log("s3.Load", " err %v", err)
2016-08-29 21:54:50 +02:00
return 0, errors.Wrap(err, "client.GetObject")
2016-01-23 14:12:12 +01:00
}
// make sure that the object is closed properly.
2016-01-24 01:15:35 +01:00
defer func() {
e := obj.Close()
if err == nil {
2016-08-29 21:54:50 +02:00
err = errors.Wrap(e, "Close")
}
2016-01-24 01:15:35 +01:00
}()
info, err := obj.Stat()
if err != nil {
2016-08-29 21:54:50 +02:00
return 0, errors.Wrap(err, "obj.Stat")
}
// handle negative offsets
if off < 0 {
// if the negative offset is larger than the object itself, read from
// the beginning.
if -off > info.Size {
off = 0
} else {
// otherwise compute the offset from the end of the file.
off = info.Size + off
}
}
// return an error if the offset is beyond the end of the file
if off > info.Size {
return 0, errors.Wrap(io.EOF, "")
}
var nextError error
// manually create an io.ErrUnexpectedEOF
if off+int64(len(p)) > info.Size {
newlen := info.Size - off
p = p[:newlen]
nextError = io.ErrUnexpectedEOF
debug.Log("s3.Load", " capped buffer to %v byte", len(p))
}
n, err = obj.ReadAt(p, off)
if int64(n) == info.Size-off && errors.Cause(err) == io.EOF {
err = nil
}
if err == nil {
err = nextError
}
return n, err
2016-01-23 14:12:12 +01:00
}
2016-01-24 01:15:35 +01:00
// Save stores data in the backend at the handle.
2016-08-31 22:39:36 +02:00
func (be s3) Save(h restic.Handle, p []byte) (err error) {
2016-01-24 01:15:35 +01:00
if err := h.Valid(); err != nil {
return err
}
debug.Log("s3.Save", "%v with %d bytes", h, len(p))
2016-01-24 01:15:35 +01:00
2016-09-01 21:19:30 +02:00
path := be.s3path(h.Type, h.Name)
2016-01-24 01:15:35 +01:00
// Check key does not already exist
_, err = be.client.StatObject(be.bucketname, path)
if err == nil {
debug.Log("s3.blob.Finalize()", "%v already exists", h)
return errors.New("key already exists")
}
2016-01-24 01:15:35 +01:00
<-be.connChan
defer func() {
be.connChan <- struct{}{}
}()
debug.Log("s3.Save", "PutObject(%v, %v, %v, %v)",
be.bucketname, path, int64(len(p)), "binary/octet-stream")
n, err := be.client.PutObject(be.bucketname, path, bytes.NewReader(p), "binary/octet-stream")
debug.Log("s3.Save", "%v -> %v bytes, err %#v", path, n, err)
2016-08-29 21:54:50 +02:00
return errors.Wrap(err, "client.PutObject")
2016-01-24 01:15:35 +01:00
}
2016-01-23 23:27:58 +01:00
// Stat returns information about a blob.
2016-08-31 22:39:36 +02:00
func (be s3) Stat(h restic.Handle) (bi restic.FileInfo, err error) {
debug.Log("s3.Stat", "%v", h)
2016-09-01 21:19:30 +02:00
path := be.s3path(h.Type, h.Name)
var obj *minio.Object
obj, err = be.client.GetObject(be.bucketname, path)
2016-01-23 23:27:58 +01:00
if err != nil {
debug.Log("s3.Stat", "GetObject() err %v", err)
2016-08-31 22:39:36 +02:00
return restic.FileInfo{}, errors.Wrap(err, "client.GetObject")
2016-01-23 23:27:58 +01:00
}
// make sure that the object is closed properly.
defer func() {
e := obj.Close()
if err == nil {
2016-08-29 21:54:50 +02:00
err = errors.Wrap(e, "Close")
}
}()
2016-01-23 23:27:58 +01:00
fi, err := obj.Stat()
if err != nil {
debug.Log("s3.Stat", "Stat() err %v", err)
2016-08-31 22:39:36 +02:00
return restic.FileInfo{}, errors.Wrap(err, "Stat")
2016-01-23 23:27:58 +01:00
}
2016-08-31 22:39:36 +02:00
return restic.FileInfo{Size: fi.Size}, nil
2016-01-23 23:27:58 +01:00
}
2015-05-10 17:20:58 +02:00
// Test returns true if a blob of the given type and name exists in the backend.
2016-08-31 22:39:36 +02:00
func (be *s3) Test(t restic.FileType, name string) (bool, error) {
2015-05-10 17:20:58 +02:00
found := false
2016-02-14 15:40:15 +01:00
path := be.s3path(t, name)
2015-12-29 00:27:29 +01:00
_, err := be.client.StatObject(be.bucketname, path)
if err == nil {
2015-05-10 17:20:58 +02:00
found = true
}
// If error, then not found
return found, nil
}
// Remove removes the blob with the given name and type.
2016-08-31 22:39:36 +02:00
func (be *s3) Remove(t restic.FileType, name string) error {
2016-02-14 15:40:15 +01:00
path := be.s3path(t, name)
2015-12-29 00:27:29 +01:00
err := be.client.RemoveObject(be.bucketname, path)
debug.Log("s3.Remove", "%v %v -> err %v", t, name, err)
2016-08-29 21:54:50 +02:00
return errors.Wrap(err, "client.RemoveObject")
2015-05-10 17:20:58 +02:00
}
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
2016-08-31 22:39:36 +02:00
func (be *s3) List(t restic.FileType, done <-chan struct{}) <-chan string {
debug.Log("s3.List", "listing %v", t)
2015-05-10 17:20:58 +02:00
ch := make(chan string)
2016-02-14 15:40:15 +01:00
prefix := be.s3path(t, "")
2015-05-10 17:20:58 +02:00
2015-12-29 00:27:29 +01:00
listresp := be.client.ListObjects(be.bucketname, prefix, true, done)
2015-05-10 17:20:58 +02:00
go func() {
defer close(ch)
for obj := range listresp {
2015-12-29 00:27:29 +01:00
m := strings.TrimPrefix(obj.Key, prefix)
2015-05-10 17:20:58 +02:00
if m == "" {
continue
}
select {
case ch <- m:
case <-done:
return
}
}
}()
return ch
}
// Remove keys for a specified backend type.
2016-08-31 22:39:36 +02:00
func (be *s3) removeKeys(t restic.FileType) error {
done := make(chan struct{})
defer close(done)
2016-08-31 22:39:36 +02:00
for key := range be.List(restic.DataFile, done) {
err := be.Remove(restic.DataFile, key)
if err != nil {
return err
}
}
return nil
}
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
2016-01-26 22:19:10 +01:00
func (be *s3) Delete() error {
2016-08-31 22:39:36 +02:00
alltypes := []restic.FileType{
restic.DataFile,
restic.KeyFile,
restic.LockFile,
restic.SnapshotFile,
restic.IndexFile}
for _, t := range alltypes {
err := be.removeKeys(t)
if err != nil {
return nil
}
}
2016-08-31 22:39:36 +02:00
return be.Remove(restic.ConfigFile, "")
2015-05-10 17:20:58 +02:00
}
// Close does nothing
2016-01-26 22:19:10 +01:00
func (be *s3) Close() error { return nil }