This commit is contained in:
Alexander Neumann 2016-08-31 22:39:36 +02:00
parent 51d8e6aa28
commit cc6a8b6e15
50 changed files with 741 additions and 668 deletions

View File

@ -1,10 +1,10 @@
package restic package archiver
import ( import (
"encoding/json" "encoding/json"
"io" "io"
"restic"
"restic/debug" "restic/debug"
"restic/pack"
"time" "time"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -12,37 +12,37 @@ import (
) )
// saveTreeJSON stores a tree in the repository. // saveTreeJSON stores a tree in the repository.
func saveTreeJSON(repo Repository, item interface{}) (ID, error) { func saveTreeJSON(repo restic.Repository, item interface{}) (restic.ID, error) {
data, err := json.Marshal(item) data, err := json.Marshal(item)
if err != nil { if err != nil {
return ID{}, errors.Wrap(err, "") return restic.ID{}, errors.Wrap(err, "")
} }
data = append(data, '\n') data = append(data, '\n')
// check if tree has been saved before // check if tree has been saved before
id := Hash(data) id := restic.Hash(data)
if repo.Index().Has(id, pack.Tree) { if repo.Index().Has(id, restic.TreeBlob) {
return id, nil return id, nil
} }
return repo.SaveJSON(pack.Tree, item) return repo.SaveJSON(restic.TreeBlob, item)
} }
// ArchiveReader reads from the reader and archives the data. Returned is the // ArchiveReader reads from the reader and archives the data. Returned is the
// resulting snapshot and its ID. // resulting snapshot and its ID.
func ArchiveReader(repo Repository, p *Progress, rd io.Reader, name string) (*Snapshot, ID, error) { func ArchiveReader(repo restic.Repository, p *restic.Progress, rd io.Reader, name string) (*restic.Snapshot, restic.ID, error) {
debug.Log("ArchiveReader", "start archiving %s", name) debug.Log("ArchiveReader", "start archiving %s", name)
sn, err := NewSnapshot([]string{name}) sn, err := restic.NewSnapshot([]string{name})
if err != nil { if err != nil {
return nil, ID{}, err return nil, restic.ID{}, err
} }
p.Start() p.Start()
defer p.Done() defer p.Done()
chnker := chunker.New(rd, repo.Config().ChunkerPolynomial()) chnker := chunker.New(rd, repo.Config().ChunkerPolynomial)
var ids IDs var ids restic.IDs
var fileSize uint64 var fileSize uint64
for { for {
@ -52,15 +52,15 @@ func ArchiveReader(repo Repository, p *Progress, rd io.Reader, name string) (*Sn
} }
if err != nil { if err != nil {
return nil, ID{}, errors.Wrap(err, "chunker.Next()") return nil, restic.ID{}, errors.Wrap(err, "chunker.Next()")
} }
id := Hash(chunk.Data) id := restic.Hash(chunk.Data)
if !repo.Index().Has(id, pack.Data) { if !repo.Index().Has(id, restic.DataBlob) {
_, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil) _, err := repo.SaveAndEncrypt(restic.DataBlob, chunk.Data, nil)
if err != nil { if err != nil {
return nil, ID{}, err return nil, restic.ID{}, err
} }
debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length) debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length)
} else { } else {
@ -71,13 +71,13 @@ func ArchiveReader(repo Repository, p *Progress, rd io.Reader, name string) (*Sn
ids = append(ids, id) ids = append(ids, id)
p.Report(Stat{Bytes: uint64(chunk.Length)}) p.Report(restic.Stat{Bytes: uint64(chunk.Length)})
fileSize += uint64(chunk.Length) fileSize += uint64(chunk.Length)
} }
tree := &Tree{ tree := &restic.Tree{
Nodes: []*Node{ Nodes: []*restic.Node{
&Node{ &restic.Node{
Name: name, Name: name,
AccessTime: time.Now(), AccessTime: time.Now(),
ModTime: time.Now(), ModTime: time.Now(),
@ -94,27 +94,26 @@ func ArchiveReader(repo Repository, p *Progress, rd io.Reader, name string) (*Sn
treeID, err := saveTreeJSON(repo, tree) treeID, err := saveTreeJSON(repo, tree)
if err != nil { if err != nil {
return nil, ID{}, err return nil, restic.ID{}, err
} }
sn.Tree = &treeID sn.Tree = &treeID
debug.Log("ArchiveReader", "tree saved as %v", treeID.Str()) debug.Log("ArchiveReader", "tree saved as %v", treeID.Str())
id, err := repo.SaveJSONUnpacked(SnapshotFile, sn) id, err := repo.SaveJSONUnpacked(restic.SnapshotFile, sn)
if err != nil { if err != nil {
return nil, ID{}, err return nil, restic.ID{}, err
} }
sn.id = &id
debug.Log("ArchiveReader", "snapshot saved as %v", id.Str()) debug.Log("ArchiveReader", "snapshot saved as %v", id.Str())
err = repo.Flush() err = repo.Flush()
if err != nil { if err != nil {
return nil, ID{}, err return nil, restic.ID{}, err
} }
err = repo.SaveIndex() err = repo.SaveIndex()
if err != nil { if err != nil {
return nil, ID{}, err return nil, restic.ID{}, err
} }
return sn, id, nil return sn, id, nil

View File

@ -1,19 +1,18 @@
package restic package archiver
import ( import (
"bytes" "bytes"
"io" "io"
"math/rand" "math/rand"
"restic/backend" "restic"
"restic/pack"
"restic/repository" "restic/repository"
"testing" "testing"
"github.com/restic/chunker" "github.com/restic/chunker"
) )
func loadBlob(t *testing.T, repo *repository.Repository, id backend.ID, buf []byte) []byte { func loadBlob(t *testing.T, repo *repository.Repository, id restic.ID, buf []byte) []byte {
buf, err := repo.LoadBlob(id, pack.Data, buf) buf, err := repo.LoadBlob(id, restic.DataBlob, buf)
if err != nil { if err != nil {
t.Fatalf("LoadBlob(%v) returned error %v", id, err) t.Fatalf("LoadBlob(%v) returned error %v", id, err)
} }
@ -21,8 +20,8 @@ func loadBlob(t *testing.T, repo *repository.Repository, id backend.ID, buf []by
return buf return buf
} }
func checkSavedFile(t *testing.T, repo *repository.Repository, treeID backend.ID, name string, rd io.Reader) { func checkSavedFile(t *testing.T, repo *repository.Repository, treeID restic.ID, name string, rd io.Reader) {
tree, err := LoadTree(repo, treeID) tree, err := restic.LoadTree(repo, treeID)
if err != nil { if err != nil {
t.Fatalf("LoadTree() returned error %v", err) t.Fatalf("LoadTree() returned error %v", err)
} }
@ -58,6 +57,11 @@ func checkSavedFile(t *testing.T, repo *repository.Repository, treeID backend.ID
} }
} }
// fakeFile returns a reader which yields deterministic pseudo-random data.
func fakeFile(t testing.TB, seed, size int64) io.Reader {
return io.LimitReader(restic.NewRandReader(rand.New(rand.NewSource(seed))), size)
}
func TestArchiveReader(t *testing.T) { func TestArchiveReader(t *testing.T) {
repo, cleanup := repository.TestRepository(t) repo, cleanup := repository.TestRepository(t)
defer cleanup() defer cleanup()

View File

@ -1,4 +1,4 @@
package restic package archiver
import ( import (
"encoding/json" "encoding/json"
@ -6,6 +6,7 @@ import (
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
"restic"
"sort" "sort"
"sync" "sync"
"time" "time"
@ -14,7 +15,6 @@ import (
"restic/debug" "restic/debug"
"restic/fs" "restic/fs"
"restic/pack"
"restic/pipe" "restic/pipe"
"github.com/restic/chunker" "github.com/restic/chunker"
@ -30,9 +30,9 @@ var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true }
// Archiver is used to backup a set of directories. // Archiver is used to backup a set of directories.
type Archiver struct { type Archiver struct {
repo Repository repo restic.Repository
knownBlobs struct { knownBlobs struct {
IDSet restic.IDSet
sync.Mutex sync.Mutex
} }
@ -43,16 +43,16 @@ type Archiver struct {
Excludes []string Excludes []string
} }
// NewArchiver returns a new archiver. // New returns a new archiver.
func NewArchiver(repo Repository) *Archiver { func New(repo restic.Repository) *Archiver {
arch := &Archiver{ arch := &Archiver{
repo: repo, repo: repo,
blobToken: make(chan struct{}, maxConcurrentBlobs), blobToken: make(chan struct{}, maxConcurrentBlobs),
knownBlobs: struct { knownBlobs: struct {
IDSet restic.IDSet
sync.Mutex sync.Mutex
}{ }{
IDSet: NewIDSet(), IDSet: restic.NewIDSet(),
}, },
} }
@ -70,7 +70,7 @@ func NewArchiver(repo Repository) *Archiver {
// When the blob is not known, false is returned and the blob is added to the // When the blob is not known, false is returned and the blob is added to the
// list. This means that the caller false is returned to is responsible to save // list. This means that the caller false is returned to is responsible to save
// the blob to the backend. // the blob to the backend.
func (arch *Archiver) isKnownBlob(id ID, t pack.BlobType) bool { func (arch *Archiver) isKnownBlob(id restic.ID, t restic.BlobType) bool {
arch.knownBlobs.Lock() arch.knownBlobs.Lock()
defer arch.knownBlobs.Unlock() defer arch.knownBlobs.Unlock()
@ -89,10 +89,10 @@ func (arch *Archiver) isKnownBlob(id ID, t pack.BlobType) bool {
} }
// Save stores a blob read from rd in the repository. // Save stores a blob read from rd in the repository.
func (arch *Archiver) Save(t pack.BlobType, data []byte, id ID) error { func (arch *Archiver) Save(t restic.BlobType, data []byte, id restic.ID) error {
debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str()) debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str())
if arch.isKnownBlob(id, pack.Data) { if arch.isKnownBlob(id, restic.DataBlob) {
debug.Log("Archiver.Save", "blob %v is known\n", id.Str()) debug.Log("Archiver.Save", "blob %v is known\n", id.Str())
return nil return nil
} }
@ -108,40 +108,40 @@ func (arch *Archiver) Save(t pack.BlobType, data []byte, id ID) error {
} }
// SaveTreeJSON stores a tree in the repository. // SaveTreeJSON stores a tree in the repository.
func (arch *Archiver) SaveTreeJSON(item interface{}) (ID, error) { func (arch *Archiver) SaveTreeJSON(item interface{}) (restic.ID, error) {
data, err := json.Marshal(item) data, err := json.Marshal(item)
if err != nil { if err != nil {
return ID{}, errors.Wrap(err, "Marshal") return restic.ID{}, errors.Wrap(err, "Marshal")
} }
data = append(data, '\n') data = append(data, '\n')
// check if tree has been saved before // check if tree has been saved before
id := Hash(data) id := restic.Hash(data)
if arch.isKnownBlob(id, pack.Tree) { if arch.isKnownBlob(id, restic.TreeBlob) {
return id, nil return id, nil
} }
return arch.repo.SaveJSON(pack.Tree, item) return arch.repo.SaveJSON(restic.TreeBlob, item)
} }
func (arch *Archiver) reloadFileIfChanged(node *Node, file fs.File) (*Node, error) { func (arch *Archiver) reloadFileIfChanged(node *restic.Node, file fs.File) (*restic.Node, error) {
fi, err := file.Stat() fi, err := file.Stat()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Stat") return nil, errors.Wrap(err, "restic.Stat")
} }
if fi.ModTime() == node.ModTime { if fi.ModTime() == node.ModTime {
return node, nil return node, nil
} }
err = arch.Error(node.path, fi, errors.New("file has changed")) err = arch.Error(node.Path, fi, errors.New("file has changed"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
node, err = NodeFromFileInfo(node.path, fi) node, err = restic.NodeFromFileInfo(node.Path, fi)
if err != nil { if err != nil {
debug.Log("Archiver.SaveFile", "NodeFromFileInfo returned error for %v: %v", node.path, err) debug.Log("Archiver.SaveFile", "restic.NodeFromFileInfo returned error for %v: %v", node.Path, err)
return nil, err return nil, err
} }
@ -149,21 +149,21 @@ func (arch *Archiver) reloadFileIfChanged(node *Node, file fs.File) (*Node, erro
} }
type saveResult struct { type saveResult struct {
id ID id restic.ID
bytes uint64 bytes uint64
} }
func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) { func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *restic.Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) {
defer freeBuf(chunk.Data) defer freeBuf(chunk.Data)
id := Hash(chunk.Data) id := restic.Hash(chunk.Data)
err := arch.Save(pack.Data, chunk.Data, id) err := arch.Save(restic.DataBlob, chunk.Data, id)
// TODO handle error // TODO handle error
if err != nil { if err != nil {
panic(err) panic(err)
} }
p.Report(Stat{Bytes: uint64(chunk.Length)}) p.Report(restic.Stat{Bytes: uint64(chunk.Length)})
arch.blobToken <- token arch.blobToken <- token
resultChannel <- saveResult{id: id, bytes: uint64(chunk.Length)} resultChannel <- saveResult{id: id, bytes: uint64(chunk.Length)}
} }
@ -182,11 +182,11 @@ func waitForResults(resultChannels [](<-chan saveResult)) ([]saveResult, error)
return results, nil return results, nil
} }
func updateNodeContent(node *Node, results []saveResult) error { func updateNodeContent(node *restic.Node, results []saveResult) error {
debug.Log("Archiver.Save", "checking size for file %s", node.path) debug.Log("Archiver.Save", "checking size for file %s", node.Path)
var bytes uint64 var bytes uint64
node.Content = make([]ID, len(results)) node.Content = make([]restic.ID, len(results))
for i, b := range results { for i, b := range results {
node.Content[i] = b.id node.Content[i] = b.id
@ -196,18 +196,18 @@ func updateNodeContent(node *Node, results []saveResult) error {
} }
if bytes != node.Size { if bytes != node.Size {
return errors.Errorf("errors saving node %q: saved %d bytes, wanted %d bytes", node.path, bytes, node.Size) return errors.Errorf("errors saving node %q: saved %d bytes, wanted %d bytes", node.Path, bytes, node.Size)
} }
debug.Log("Archiver.SaveFile", "SaveFile(%q): %v blobs\n", node.path, len(results)) debug.Log("Archiver.SaveFile", "SaveFile(%q): %v blobs\n", node.Path, len(results))
return nil return nil
} }
// SaveFile stores the content of the file on the backend as a Blob by calling // SaveFile stores the content of the file on the backend as a Blob by calling
// Save for each chunk. // Save for each chunk.
func (arch *Archiver) SaveFile(p *Progress, node *Node) error { func (arch *Archiver) SaveFile(p *restic.Progress, node *restic.Node) error {
file, err := fs.Open(node.path) file, err := fs.Open(node.Path)
defer file.Close() defer file.Close()
if err != nil { if err != nil {
return errors.Wrap(err, "Open") return errors.Wrap(err, "Open")
@ -218,7 +218,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
return err return err
} }
chnker := chunker.New(file, arch.repo.Config().ChunkerPolynomial()) chnker := chunker.New(file, arch.repo.Config().ChunkerPolynomial)
resultChannels := [](<-chan saveResult){} resultChannels := [](<-chan saveResult){}
for { for {
@ -245,7 +245,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
return err return err
} }
func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan struct{}, entCh <-chan pipe.Entry) { func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *restic.Progress, done <-chan struct{}, entCh <-chan pipe.Entry) {
defer func() { defer func() {
debug.Log("Archiver.fileWorker", "done") debug.Log("Archiver.fileWorker", "done")
wg.Done() wg.Done()
@ -267,16 +267,16 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
fmt.Fprintf(os.Stderr, "error for %v: %v\n", e.Path(), e.Error()) fmt.Fprintf(os.Stderr, "error for %v: %v\n", e.Path(), e.Error())
// ignore this file // ignore this file
e.Result() <- nil e.Result() <- nil
p.Report(Stat{Errors: 1}) p.Report(restic.Stat{Errors: 1})
continue continue
} }
node, err := NodeFromFileInfo(e.Fullpath(), e.Info()) node, err := restic.NodeFromFileInfo(e.Fullpath(), e.Info())
if err != nil { if err != nil {
// TODO: integrate error reporting // TODO: integrate error reporting
debug.Log("Archiver.fileWorker", "NodeFromFileInfo returned error for %v: %v", node.path, err) debug.Log("Archiver.fileWorker", "restic.NodeFromFileInfo returned error for %v: %v", node.Path, err)
e.Result() <- nil e.Result() <- nil
p.Report(Stat{Errors: 1}) p.Report(restic.Stat{Errors: 1})
continue continue
} }
@ -284,12 +284,12 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
if e.Node != nil { if e.Node != nil {
debug.Log("Archiver.fileWorker", " %v use old data", e.Path()) debug.Log("Archiver.fileWorker", " %v use old data", e.Path())
oldNode := e.Node.(*Node) oldNode := e.Node.(*restic.Node)
// check if all content is still available in the repository // check if all content is still available in the repository
contentMissing := false contentMissing := false
for _, blob := range oldNode.blobs { for _, blob := range oldNode.Content {
if ok, err := arch.repo.Backend().Test(DataFile, blob.Storage.String()); !ok || err != nil { if !arch.repo.Index().Has(blob, restic.DataBlob) {
debug.Log("Archiver.fileWorker", " %v not using old data, %v (%v) is missing", e.Path(), blob.ID.Str(), blob.Storage.Str()) debug.Log("Archiver.fileWorker", " %v not using old data, %v is missing", e.Path(), blob.Str())
contentMissing = true contentMissing = true
break break
} }
@ -297,7 +297,6 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
if !contentMissing { if !contentMissing {
node.Content = oldNode.Content node.Content = oldNode.Content
node.blobs = oldNode.blobs
debug.Log("Archiver.fileWorker", " %v content is complete", e.Path()) debug.Log("Archiver.fileWorker", " %v content is complete", e.Path())
} }
} else { } else {
@ -310,20 +309,20 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
err = arch.SaveFile(p, node) err = arch.SaveFile(p, node)
if err != nil { if err != nil {
// TODO: integrate error reporting // TODO: integrate error reporting
fmt.Fprintf(os.Stderr, "error for %v: %v\n", node.path, err) fmt.Fprintf(os.Stderr, "error for %v: %v\n", node.Path, err)
// ignore this file // ignore this file
e.Result() <- nil e.Result() <- nil
p.Report(Stat{Errors: 1}) p.Report(restic.Stat{Errors: 1})
continue continue
} }
} else { } else {
// report old data size // report old data size
p.Report(Stat{Bytes: node.Size}) p.Report(restic.Stat{Bytes: node.Size})
} }
debug.Log("Archiver.fileWorker", " processed %v, %d/%d blobs", e.Path(), len(node.Content), len(node.blobs)) debug.Log("Archiver.fileWorker", " processed %v, %d blobs", e.Path(), len(node.Content))
e.Result() <- node e.Result() <- node
p.Report(Stat{Files: 1}) p.Report(restic.Stat{Files: 1})
case <-done: case <-done:
// pipeline was cancelled // pipeline was cancelled
return return
@ -331,7 +330,7 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
} }
} }
func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan struct{}, dirCh <-chan pipe.Dir) { func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *restic.Progress, done <-chan struct{}, dirCh <-chan pipe.Dir) {
debug.Log("Archiver.dirWorker", "start") debug.Log("Archiver.dirWorker", "start")
defer func() { defer func() {
debug.Log("Archiver.dirWorker", "done") debug.Log("Archiver.dirWorker", "done")
@ -350,11 +349,11 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
if dir.Error() != nil { if dir.Error() != nil {
fmt.Fprintf(os.Stderr, "error walking dir %v: %v\n", dir.Path(), dir.Error()) fmt.Fprintf(os.Stderr, "error walking dir %v: %v\n", dir.Path(), dir.Error())
dir.Result() <- nil dir.Result() <- nil
p.Report(Stat{Errors: 1}) p.Report(restic.Stat{Errors: 1})
continue continue
} }
tree := NewTree() tree := restic.NewTree()
// wait for all content // wait for all content
for _, ch := range dir.Entries { for _, ch := range dir.Entries {
@ -369,22 +368,22 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
} }
// else insert node // else insert node
node := res.(*Node) node := res.(*restic.Node)
tree.Insert(node) tree.Insert(node)
if node.FileType == "dir" { if node.FileType == "dir" {
debug.Log("Archiver.dirWorker", "got tree node for %s: %v", node.path, node.Subtree) debug.Log("Archiver.dirWorker", "got tree node for %s: %v", node.Path, node.Subtree)
if node.Subtree.IsNull() { if node.Subtree.IsNull() {
panic("invalid null subtree ID") panic("invalid null subtree restic.ID")
} }
} }
} }
node := &Node{} node := &restic.Node{}
if dir.Path() != "" && dir.Info() != nil { if dir.Path() != "" && dir.Info() != nil {
n, err := NodeFromFileInfo(dir.Path(), dir.Info()) n, err := restic.NodeFromFileInfo(dir.Path(), dir.Info())
if err != nil { if err != nil {
n.Error = err.Error() n.Error = err.Error()
dir.Result() <- n dir.Result() <- n
@ -403,7 +402,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
} }
debug.Log("Archiver.dirWorker", "save tree for %s: %v", dir.Path(), id.Str()) debug.Log("Archiver.dirWorker", "save tree for %s: %v", dir.Path(), id.Str())
if id.IsNull() { if id.IsNull() {
panic("invalid null subtree ID return from SaveTreeJSON()") panic("invalid null subtree restic.ID return from SaveTreeJSON()")
} }
node.Subtree = &id node.Subtree = &id
@ -412,7 +411,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
dir.Result() <- node dir.Result() <- node
if dir.Path() != "" { if dir.Path() != "" {
p.Report(Stat{Dirs: 1}) p.Report(restic.Stat{Dirs: 1})
} }
case <-done: case <-done:
// pipeline was cancelled // pipeline was cancelled
@ -422,7 +421,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
} }
type archivePipe struct { type archivePipe struct {
Old <-chan WalkTreeJob Old <-chan restic.WalkTreeJob
New <-chan pipe.Job New <-chan pipe.Job
} }
@ -457,7 +456,7 @@ func copyJobs(done <-chan struct{}, in <-chan pipe.Job, out chan<- pipe.Job) {
type archiveJob struct { type archiveJob struct {
hasOld bool hasOld bool
old WalkTreeJob old restic.WalkTreeJob
new pipe.Job new pipe.Job
} }
@ -471,7 +470,7 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
var ( var (
loadOld, loadNew bool = true, true loadOld, loadNew bool = true, true
ok bool ok bool
oldJob WalkTreeJob oldJob restic.WalkTreeJob
newJob pipe.Job newJob pipe.Job
) )
@ -565,7 +564,7 @@ func (j archiveJob) Copy() pipe.Job {
} }
// if file is newer, return the new job // if file is newer, return the new job
if j.old.Node.isNewer(j.new.Fullpath(), j.new.Info()) { if j.old.Node.IsNewer(j.new.Fullpath(), j.new.Info()) {
debug.Log("archiveJob.Copy", " job %v is newer", j.new.Path()) debug.Log("archiveJob.Copy", " job %v is newer", j.new.Path())
return j.new return j.new
} }
@ -630,10 +629,10 @@ func (p baseNameSlice) Len() int { return len(p) }
func (p baseNameSlice) Less(i, j int) bool { return filepath.Base(p[i]) < filepath.Base(p[j]) } func (p baseNameSlice) Less(i, j int) bool { return filepath.Base(p[i]) < filepath.Base(p[j]) }
func (p baseNameSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p baseNameSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// Snapshot creates a snapshot of the given paths. If parentID is set, this is // Snapshot creates a snapshot of the given paths. If parentrestic.ID is set, this is
// used to compare the files to the ones archived at the time this snapshot was // used to compare the files to the ones archived at the time this snapshot was
// taken. // taken.
func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *ID) (*Snapshot, ID, error) { func (arch *Archiver) Snapshot(p *restic.Progress, paths []string, parentID *restic.ID) (*restic.Snapshot, restic.ID, error) {
paths = unique(paths) paths = unique(paths)
sort.Sort(baseNameSlice(paths)) sort.Sort(baseNameSlice(paths))
@ -649,9 +648,9 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *ID) (*Snap
defer p.Done() defer p.Done()
// create new snapshot // create new snapshot
sn, err := NewSnapshot(paths) sn, err := restic.NewSnapshot(paths)
if err != nil { if err != nil {
return nil, ID{}, err return nil, restic.ID{}, err
} }
sn.Excludes = arch.Excludes sn.Excludes = arch.Excludes
@ -662,18 +661,18 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *ID) (*Snap
sn.Parent = parentID sn.Parent = parentID
// load parent snapshot // load parent snapshot
parent, err := LoadSnapshot(arch.repo, *parentID) parent, err := restic.LoadSnapshot(arch.repo, *parentID)
if err != nil { if err != nil {
return nil, ID{}, err return nil, restic.ID{}, err
} }
// start walker on old tree // start walker on old tree
ch := make(chan WalkTreeJob) ch := make(chan restic.WalkTreeJob)
go WalkTree(arch.repo, *parent.Tree, done, ch) go restic.WalkTree(arch.repo, *parent.Tree, done, ch)
jobs.Old = ch jobs.Old = ch
} else { } else {
// use closed channel // use closed channel
ch := make(chan WalkTreeJob) ch := make(chan restic.WalkTreeJob)
close(ch) close(ch)
jobs.Old = ch jobs.Old = ch
} }
@ -728,31 +727,29 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *ID) (*Snap
debug.Log("Archiver.Snapshot", "workers terminated") debug.Log("Archiver.Snapshot", "workers terminated")
// receive the top-level tree // receive the top-level tree
root := (<-resCh).(*Node) root := (<-resCh).(*restic.Node)
debug.Log("Archiver.Snapshot", "root node received: %v", root.Subtree.Str()) debug.Log("Archiver.Snapshot", "root node received: %v", root.Subtree.Str())
sn.Tree = root.Subtree sn.Tree = root.Subtree
// save snapshot // save snapshot
id, err := arch.repo.SaveJSONUnpacked(SnapshotFile, sn) id, err := arch.repo.SaveJSONUnpacked(restic.SnapshotFile, sn)
if err != nil { if err != nil {
return nil, ID{}, err return nil, restic.ID{}, err
} }
// store ID in snapshot struct
sn.id = &id
debug.Log("Archiver.Snapshot", "saved snapshot %v", id.Str()) debug.Log("Archiver.Snapshot", "saved snapshot %v", id.Str())
// flush repository // flush repository
err = arch.repo.Flush() err = arch.repo.Flush()
if err != nil { if err != nil {
return nil, ID{}, err return nil, restic.ID{}, err
} }
// save index // save index
err = arch.repo.SaveIndex() err = arch.repo.SaveIndex()
if err != nil { if err != nil {
debug.Log("Archiver.Snapshot", "error saving index: %v", err) debug.Log("Archiver.Snapshot", "error saving index: %v", err)
return nil, ID{}, err return nil, restic.ID{}, err
} }
debug.Log("Archiver.Snapshot", "saved indexes") debug.Log("Archiver.Snapshot", "saved indexes")
@ -768,13 +765,13 @@ func isRegularFile(fi os.FileInfo) bool {
return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0 return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0
} }
// Scan traverses the dirs to collect Stat information while emitting progress // Scan traverses the dirs to collect restic.Stat information while emitting progress
// information with p. // information with p.
func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) { func Scan(dirs []string, filter pipe.SelectFunc, p *restic.Progress) (restic.Stat, error) {
p.Start() p.Start()
defer p.Done() defer p.Done()
var stat Stat var stat restic.Stat
for _, dir := range dirs { for _, dir := range dirs {
debug.Log("Scan", "Start for %v", dir) debug.Log("Scan", "Start for %v", dir)
@ -797,7 +794,7 @@ func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) {
return nil return nil
} }
s := Stat{} s := restic.Stat{}
if fi.IsDir() { if fi.IsDir() {
s.Dirs++ s.Dirs++
} else { } else {
@ -817,7 +814,7 @@ func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) {
debug.Log("Scan", "Done for %v, err: %v", dir, err) debug.Log("Scan", "Done for %v, err: %v", dir, err)
if err != nil { if err != nil {
return Stat{}, errors.Wrap(err, "fs.Walk") return restic.Stat{}, errors.Wrap(err, "fs.Walk")
} }
} }

View File

@ -1,4 +1,4 @@
package restic_test package archiver_test
import ( import (
"crypto/rand" "crypto/rand"
@ -103,13 +103,13 @@ func testArchiverDuplication(t *testing.T) {
id := randomID() id := randomID()
if repo.Index().Has(id, pack.Data) { if repo.Index().Has(id, restic.DataBlob) {
continue continue
} }
buf := make([]byte, 50) buf := make([]byte, 50)
err := arch.Save(pack.Data, buf, id) err := arch.Save(restic.DataBlob, buf, id)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -1,4 +1,4 @@
package restic package archiver
import ( import (
"os" "os"

View File

@ -1,4 +1,4 @@
package restic_test package archiver_test
import ( import (
"bytes" "bytes"
@ -146,9 +146,9 @@ func archiveWithDedup(t testing.TB) {
t.Logf("archived snapshot %v", sn.ID().Str()) t.Logf("archived snapshot %v", sn.ID().Str())
// get archive stats // get archive stats
cnt.before.packs = repo.Count(backend.Data) cnt.before.packs = repo.Count(restic.DataFile)
cnt.before.dataBlobs = repo.Index().Count(pack.Data) cnt.before.dataBlobs = repo.Index().Count(restic.DataBlob)
cnt.before.treeBlobs = repo.Index().Count(pack.Tree) cnt.before.treeBlobs = repo.Index().Count(restic.TreeBlob)
t.Logf("packs %v, data blobs %v, tree blobs %v", t.Logf("packs %v, data blobs %v, tree blobs %v",
cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs) cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs)
@ -157,9 +157,9 @@ func archiveWithDedup(t testing.TB) {
t.Logf("archived snapshot %v", sn2.ID().Str()) t.Logf("archived snapshot %v", sn2.ID().Str())
// get archive stats again // get archive stats again
cnt.after.packs = repo.Count(backend.Data) cnt.after.packs = repo.Count(restic.DataFile)
cnt.after.dataBlobs = repo.Index().Count(pack.Data) cnt.after.dataBlobs = repo.Index().Count(restic.DataBlob)
cnt.after.treeBlobs = repo.Index().Count(pack.Tree) cnt.after.treeBlobs = repo.Index().Count(restic.TreeBlob)
t.Logf("packs %v, data blobs %v, tree blobs %v", t.Logf("packs %v, data blobs %v, tree blobs %v",
cnt.after.packs, cnt.after.dataBlobs, cnt.after.treeBlobs) cnt.after.packs, cnt.after.dataBlobs, cnt.after.treeBlobs)
@ -174,9 +174,9 @@ func archiveWithDedup(t testing.TB) {
t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str()) t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str())
// get archive stats again // get archive stats again
cnt.after2.packs = repo.Count(backend.Data) cnt.after2.packs = repo.Count(restic.DataFile)
cnt.after2.dataBlobs = repo.Index().Count(pack.Data) cnt.after2.dataBlobs = repo.Index().Count(restic.DataBlob)
cnt.after2.treeBlobs = repo.Index().Count(pack.Tree) cnt.after2.treeBlobs = repo.Index().Count(restic.TreeBlob)
t.Logf("packs %v, data blobs %v, tree blobs %v", t.Logf("packs %v, data blobs %v, tree blobs %v",
cnt.after2.packs, cnt.after2.dataBlobs, cnt.after2.treeBlobs) cnt.after2.packs, cnt.after2.dataBlobs, cnt.after2.treeBlobs)
@ -210,7 +210,7 @@ func BenchmarkLoadTree(t *testing.B) {
for _, idx := range repo.Index().All() { for _, idx := range repo.Index().All() {
for blob := range idx.Each(done) { for blob := range idx.Each(done) {
if blob.Type != pack.Tree { if blob.Type != restic.TreeBlob {
continue continue
} }
@ -267,7 +267,7 @@ func testParallelSaveWithDuplication(t *testing.T, seed int) {
id := backend.Hash(c.Data) id := backend.Hash(c.Data)
time.Sleep(time.Duration(id[0])) time.Sleep(time.Duration(id[0]))
err := arch.Save(pack.Data, c.Data, id) err := arch.Save(restic.DataBlob, c.Data, id)
<-barrier <-barrier
errChan <- err errChan <- err
}(c, errChan) }(c, errChan)

View File

@ -0,0 +1,21 @@
package archiver
import (
"sync"
"github.com/restic/chunker"
)
var bufPool = sync.Pool{
New: func() interface{} {
return make([]byte, chunker.MinSize)
},
}
func getBuf() []byte {
return bufPool.Get().([]byte)
}
func freeBuf(data []byte) {
bufPool.Put(data)
}

View File

@ -1,6 +1,7 @@
package backend_test package backend_test
import ( import (
"restic"
"testing" "testing"
"restic/backend" "restic/backend"
@ -8,10 +9,10 @@ import (
) )
type mockBackend struct { type mockBackend struct {
list func(backend.Type, <-chan struct{}) <-chan string list func(restic.FileType, <-chan struct{}) <-chan string
} }
func (m mockBackend) List(t backend.Type, done <-chan struct{}) <-chan string { func (m mockBackend) List(t restic.FileType, done <-chan struct{}) <-chan string {
return m.list(t, done) return m.list(t, done)
} }
@ -30,7 +31,7 @@ func TestPrefixLength(t *testing.T) {
list := samples list := samples
m := mockBackend{} m := mockBackend{}
m.list = func(t backend.Type, done <-chan struct{}) <-chan string { m.list = func(t restic.FileType, done <-chan struct{}) <-chan string {
ch := make(chan string) ch := make(chan string)
go func() { go func() {
defer close(ch) defer close(ch)
@ -45,17 +46,17 @@ func TestPrefixLength(t *testing.T) {
return ch return ch
} }
l, err := backend.PrefixLength(m, backend.Snapshot) l, err := backend.PrefixLength(m, restic.SnapshotFile)
OK(t, err) OK(t, err)
Equals(t, 19, l) Equals(t, 19, l)
list = samples[:3] list = samples[:3]
l, err = backend.PrefixLength(m, backend.Snapshot) l, err = backend.PrefixLength(m, restic.SnapshotFile)
OK(t, err) OK(t, err)
Equals(t, 19, l) Equals(t, 19, l)
list = samples[3:] list = samples[3:]
l, err = backend.PrefixLength(m, backend.Snapshot) l, err = backend.PrefixLength(m, restic.SnapshotFile)
OK(t, err) OK(t, err)
Equals(t, 8, l) Equals(t, 8, l)
} }

View File

@ -5,6 +5,7 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"restic"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -18,6 +19,8 @@ type Local struct {
p string p string
} }
var _ restic.Backend = &Local{}
func paths(dir string) []string { func paths(dir string) []string {
return []string{ return []string{
dir, dir,
@ -69,8 +72,8 @@ func (b *Local) Location() string {
} }
// Construct path for given Type and name. // Construct path for given Type and name.
func filename(base string, t backend.Type, name string) string { func filename(base string, t restic.FileType, name string) string {
if t == backend.Config { if t == restic.ConfigFile {
return filepath.Join(base, "config") return filepath.Join(base, "config")
} }
@ -78,21 +81,21 @@ func filename(base string, t backend.Type, name string) string {
} }
// Construct directory for given Type. // Construct directory for given Type.
func dirname(base string, t backend.Type, name string) string { func dirname(base string, t restic.FileType, name string) string {
var n string var n string
switch t { switch t {
case backend.Data: case restic.DataFile:
n = backend.Paths.Data n = backend.Paths.Data
if len(name) > 2 { if len(name) > 2 {
n = filepath.Join(n, name[:2]) n = filepath.Join(n, name[:2])
} }
case backend.Snapshot: case restic.SnapshotFile:
n = backend.Paths.Snapshots n = backend.Paths.Snapshots
case backend.Index: case restic.IndexFile:
n = backend.Paths.Index n = backend.Paths.Index
case backend.Lock: case restic.LockFile:
n = backend.Paths.Locks n = backend.Paths.Locks
case backend.Key: case restic.KeyFile:
n = backend.Paths.Keys n = backend.Paths.Keys
} }
return filepath.Join(base, n) return filepath.Join(base, n)
@ -102,13 +105,13 @@ func dirname(base string, t backend.Type, name string) string {
// saves it in p. Load has the same semantics as io.ReaderAt, with one // saves it in p. Load has the same semantics as io.ReaderAt, with one
// exception: when off is lower than zero, it is treated as an offset relative // exception: when off is lower than zero, it is treated as an offset relative
// to the end of the file. // to the end of the file.
func (b *Local) Load(h backend.Handle, p []byte, off int64) (n int, err error) { func (b *Local) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
debug.Log("backend.local.Load", "Load %v, length %v at %v", h, len(p), off) debug.Log("backend.local.Load", "Load %v, length %v at %v", h, len(p), off)
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return 0, err return 0, err
} }
f, err := fs.Open(filename(b.p, h.Type, h.Name)) f, err := fs.Open(filename(b.p, h.FileType, h.Name))
if err != nil { if err != nil {
return 0, errors.Wrap(err, "Open") return 0, errors.Wrap(err, "Open")
} }
@ -168,7 +171,7 @@ func writeToTempfile(tempdir string, p []byte) (filename string, err error) {
} }
// Save stores data in the backend at the handle. // Save stores data in the backend at the handle.
func (b *Local) Save(h backend.Handle, p []byte) (err error) { func (b *Local) Save(h restic.Handle, p []byte) (err error) {
debug.Log("backend.local.Save", "Save %v, length %v", h, len(p)) debug.Log("backend.local.Save", "Save %v, length %v", h, len(p))
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return err return err
@ -180,7 +183,7 @@ func (b *Local) Save(h backend.Handle, p []byte) (err error) {
return err return err
} }
filename := filename(b.p, h.Type, h.Name) filename := filename(b.p, h.FileType, h.Name)
// test if new path already exists // test if new path already exists
if _, err := fs.Stat(filename); err == nil { if _, err := fs.Stat(filename); err == nil {
@ -188,7 +191,7 @@ func (b *Local) Save(h backend.Handle, p []byte) (err error) {
} }
// create directories if necessary, ignore errors // create directories if necessary, ignore errors
if h.Type == backend.Data { if h.FileType == restic.DataFile {
err = fs.MkdirAll(filepath.Dir(filename), backend.Modes.Dir) err = fs.MkdirAll(filepath.Dir(filename), backend.Modes.Dir)
if err != nil { if err != nil {
return errors.Wrap(err, "MkdirAll") return errors.Wrap(err, "MkdirAll")
@ -213,22 +216,22 @@ func (b *Local) Save(h backend.Handle, p []byte) (err error) {
} }
// Stat returns information about a blob. // Stat returns information about a blob.
func (b *Local) Stat(h backend.Handle) (backend.BlobInfo, error) { func (b *Local) Stat(h restic.Handle) (restic.FileInfo, error) {
debug.Log("backend.local.Stat", "Stat %v", h) debug.Log("backend.local.Stat", "Stat %v", h)
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err return restic.FileInfo{}, err
} }
fi, err := fs.Stat(filename(b.p, h.Type, h.Name)) fi, err := fs.Stat(filename(b.p, h.FileType, h.Name))
if err != nil { if err != nil {
return backend.BlobInfo{}, errors.Wrap(err, "Stat") return restic.FileInfo{}, errors.Wrap(err, "Stat")
} }
return backend.BlobInfo{Size: fi.Size()}, nil return restic.FileInfo{Size: fi.Size()}, nil
} }
// Test returns true if a blob of the given type and name exists in the backend. // Test returns true if a blob of the given type and name exists in the backend.
func (b *Local) Test(t backend.Type, name string) (bool, error) { func (b *Local) Test(t restic.FileType, name string) (bool, error) {
debug.Log("backend.local.Test", "Test %v %v", t, name) debug.Log("backend.local.Test", "Test %v %v", t, name)
_, err := fs.Stat(filename(b.p, t, name)) _, err := fs.Stat(filename(b.p, t, name))
if err != nil { if err != nil {
@ -242,7 +245,7 @@ func (b *Local) Test(t backend.Type, name string) (bool, error) {
} }
// Remove removes the blob with the given name and type. // Remove removes the blob with the given name and type.
func (b *Local) Remove(t backend.Type, name string) error { func (b *Local) Remove(t restic.FileType, name string) error {
debug.Log("backend.local.Remove", "Remove %v %v", t, name) debug.Log("backend.local.Remove", "Remove %v %v", t, name)
fn := filename(b.p, t, name) fn := filename(b.p, t, name)
@ -317,10 +320,10 @@ func listDirs(dir string) (filenames []string, err error) {
// List returns a channel that yields all names of blobs of type t. A // List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending // goroutine is started for this. If the channel done is closed, sending
// stops. // stops.
func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string { func (b *Local) List(t restic.FileType, done <-chan struct{}) <-chan string {
debug.Log("backend.local.List", "List %v", t) debug.Log("backend.local.List", "List %v", t)
lister := listDir lister := listDir
if t == backend.Data { if t == restic.DataFile {
lister = listDirs lister = listDirs
} }

View File

@ -2,23 +2,23 @@ package mem
import ( import (
"io" "io"
"restic"
"sync" "sync"
"github.com/pkg/errors" "github.com/pkg/errors"
"restic/backend"
"restic/debug" "restic/debug"
) )
type entry struct { type entry struct {
Type backend.Type Type restic.FileType
Name string Name string
} }
type memMap map[entry][]byte type memMap map[entry][]byte
// make sure that MemoryBackend implements backend.Backend // make sure that MemoryBackend implements backend.Backend
var _ backend.Backend = &MemoryBackend{} var _ restic.Backend = &MemoryBackend{}
// MemoryBackend is a mock backend that uses a map for storing all data in // MemoryBackend is a mock backend that uses a map for storing all data in
// memory. This should only be used for tests. // memory. This should only be used for tests.
@ -39,7 +39,7 @@ func New() *MemoryBackend {
} }
// Test returns whether a file exists. // Test returns whether a file exists.
func (be *MemoryBackend) Test(t backend.Type, name string) (bool, error) { func (be *MemoryBackend) Test(t restic.FileType, name string) (bool, error) {
be.m.Lock() be.m.Lock()
defer be.m.Unlock() defer be.m.Unlock()
@ -53,7 +53,7 @@ func (be *MemoryBackend) Test(t backend.Type, name string) (bool, error) {
} }
// Load reads data from the backend. // Load reads data from the backend.
func (be *MemoryBackend) Load(h backend.Handle, p []byte, off int64) (int, error) { func (be *MemoryBackend) Load(h restic.Handle, p []byte, off int64) (int, error) {
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return 0, err return 0, err
} }
@ -61,17 +61,17 @@ func (be *MemoryBackend) Load(h backend.Handle, p []byte, off int64) (int, error
be.m.Lock() be.m.Lock()
defer be.m.Unlock() defer be.m.Unlock()
if h.Type == backend.Config { if h.FileType == restic.ConfigFile {
h.Name = "" h.Name = ""
} }
debug.Log("MemoryBackend.Load", "get %v offset %v len %v", h, off, len(p)) debug.Log("MemoryBackend.Load", "get %v offset %v len %v", h, off, len(p))
if _, ok := be.data[entry{h.Type, h.Name}]; !ok { if _, ok := be.data[entry{h.FileType, h.Name}]; !ok {
return 0, errors.New("no such data") return 0, errors.New("no such data")
} }
buf := be.data[entry{h.Type, h.Name}] buf := be.data[entry{h.FileType, h.Name}]
switch { switch {
case off > int64(len(buf)): case off > int64(len(buf)):
return 0, errors.New("offset beyond end of file") return 0, errors.New("offset beyond end of file")
@ -93,7 +93,7 @@ func (be *MemoryBackend) Load(h backend.Handle, p []byte, off int64) (int, error
} }
// Save adds new Data to the backend. // Save adds new Data to the backend.
func (be *MemoryBackend) Save(h backend.Handle, p []byte) error { func (be *MemoryBackend) Save(h restic.Handle, p []byte) error {
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return err return err
} }
@ -101,47 +101,47 @@ func (be *MemoryBackend) Save(h backend.Handle, p []byte) error {
be.m.Lock() be.m.Lock()
defer be.m.Unlock() defer be.m.Unlock()
if h.Type == backend.Config { if h.FileType == restic.ConfigFile {
h.Name = "" h.Name = ""
} }
if _, ok := be.data[entry{h.Type, h.Name}]; ok { if _, ok := be.data[entry{h.FileType, h.Name}]; ok {
return errors.New("file already exists") return errors.New("file already exists")
} }
debug.Log("MemoryBackend.Save", "save %v bytes at %v", len(p), h) debug.Log("MemoryBackend.Save", "save %v bytes at %v", len(p), h)
buf := make([]byte, len(p)) buf := make([]byte, len(p))
copy(buf, p) copy(buf, p)
be.data[entry{h.Type, h.Name}] = buf be.data[entry{h.FileType, h.Name}] = buf
return nil return nil
} }
// Stat returns information about a file in the backend. // Stat returns information about a file in the backend.
func (be *MemoryBackend) Stat(h backend.Handle) (backend.BlobInfo, error) { func (be *MemoryBackend) Stat(h restic.Handle) (restic.FileInfo, error) {
be.m.Lock() be.m.Lock()
defer be.m.Unlock() defer be.m.Unlock()
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err return restic.FileInfo{}, err
} }
if h.Type == backend.Config { if h.FileType == restic.ConfigFile {
h.Name = "" h.Name = ""
} }
debug.Log("MemoryBackend.Stat", "stat %v", h) debug.Log("MemoryBackend.Stat", "stat %v", h)
e, ok := be.data[entry{h.Type, h.Name}] e, ok := be.data[entry{h.FileType, h.Name}]
if !ok { if !ok {
return backend.BlobInfo{}, errors.New("no such data") return restic.FileInfo{}, errors.New("no such data")
} }
return backend.BlobInfo{Size: int64(len(e))}, nil return restic.FileInfo{Size: int64(len(e))}, nil
} }
// Remove deletes a file from the backend. // Remove deletes a file from the backend.
func (be *MemoryBackend) Remove(t backend.Type, name string) error { func (be *MemoryBackend) Remove(t restic.FileType, name string) error {
be.m.Lock() be.m.Lock()
defer be.m.Unlock() defer be.m.Unlock()
@ -157,7 +157,7 @@ func (be *MemoryBackend) Remove(t backend.Type, name string) error {
} }
// List returns a channel which yields entries from the backend. // List returns a channel which yields entries from the backend.
func (be *MemoryBackend) List(t backend.Type, done <-chan struct{}) <-chan string { func (be *MemoryBackend) List(t restic.FileType, done <-chan struct{}) <-chan string {
be.m.Lock() be.m.Lock()
defer be.m.Unlock() defer be.m.Unlock()

View File

@ -8,6 +8,7 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
"restic"
"strings" "strings"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -18,27 +19,27 @@ import (
const connLimit = 10 const connLimit = 10
// restPath returns the path to the given resource. // restPath returns the path to the given resource.
func restPath(url *url.URL, h backend.Handle) string { func restPath(url *url.URL, h restic.Handle) string {
u := *url u := *url
var dir string var dir string
switch h.Type { switch h.FileType {
case backend.Config: case restic.ConfigFile:
dir = "" dir = ""
h.Name = "config" h.Name = "config"
case backend.Data: case restic.DataFile:
dir = backend.Paths.Data dir = backend.Paths.Data
case backend.Snapshot: case restic.SnapshotFile:
dir = backend.Paths.Snapshots dir = backend.Paths.Snapshots
case backend.Index: case restic.IndexFile:
dir = backend.Paths.Index dir = backend.Paths.Index
case backend.Lock: case restic.LockFile:
dir = backend.Paths.Locks dir = backend.Paths.Locks
case backend.Key: case restic.KeyFile:
dir = backend.Paths.Keys dir = backend.Paths.Keys
default: default:
dir = string(h.Type) dir = string(h.FileType)
} }
u.Path = path.Join(url.Path, dir, h.Name) u.Path = path.Join(url.Path, dir, h.Name)
@ -71,7 +72,7 @@ func (b *restBackend) Location() string {
// Load returns the data stored in the backend for h at the given offset // Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt. // and saves it in p. Load has the same semantics as io.ReaderAt.
func (b *restBackend) Load(h backend.Handle, p []byte, off int64) (n int, err error) { func (b *restBackend) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return 0, err return 0, err
} }
@ -120,7 +121,7 @@ func (b *restBackend) Load(h backend.Handle, p []byte, off int64) (n int, err er
} }
// Save stores data in the backend at the handle. // Save stores data in the backend at the handle.
func (b *restBackend) Save(h backend.Handle, p []byte) (err error) { func (b *restBackend) Save(h restic.Handle, p []byte) (err error) {
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return err return err
} }
@ -151,7 +152,7 @@ func (b *restBackend) Save(h backend.Handle, p []byte) (err error) {
} }
// Stat returns information about a blob. // Stat returns information about a blob.
func (b *restBackend) Stat(h backend.Handle) (backend.BlobInfo, error) { func (b *restBackend) Stat(h restic.Handle) (backend.BlobInfo, error) {
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err return backend.BlobInfo{}, err
} }
@ -183,8 +184,8 @@ func (b *restBackend) Stat(h backend.Handle) (backend.BlobInfo, error) {
} }
// Test returns true if a blob of the given type and name exists in the backend. // Test returns true if a blob of the given type and name exists in the backend.
func (b *restBackend) Test(t backend.Type, name string) (bool, error) { func (b *restBackend) Test(t restic.FileType, name string) (bool, error) {
_, err := b.Stat(backend.Handle{Type: t, Name: name}) _, err := b.Stat(restic.Handle{FileType: t, Name: name})
if err != nil { if err != nil {
return false, nil return false, nil
} }
@ -193,8 +194,8 @@ func (b *restBackend) Test(t backend.Type, name string) (bool, error) {
} }
// Remove removes the blob with the given name and type. // Remove removes the blob with the given name and type.
func (b *restBackend) Remove(t backend.Type, name string) error { func (b *restBackend) Remove(t restic.FileType, name string) error {
h := backend.Handle{Type: t, Name: name} h := restic.Handle{FileType: t, Name: name}
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return err return err
} }
@ -221,10 +222,10 @@ func (b *restBackend) Remove(t backend.Type, name string) error {
// List returns a channel that yields all names of blobs of type t. A // List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending // goroutine is started for this. If the channel done is closed, sending
// stops. // stops.
func (b *restBackend) List(t backend.Type, done <-chan struct{}) <-chan string { func (b *restBackend) List(t restic.FileType, done <-chan struct{}) <-chan string {
ch := make(chan string) ch := make(chan string)
url := restPath(b.url, backend.Handle{Type: t}) url := restPath(b.url, restic.Handle{FileType: t})
if !strings.HasSuffix(url, "/") { if !strings.HasSuffix(url, "/") {
url += "/" url += "/"
} }

View File

@ -2,36 +2,36 @@ package rest
import ( import (
"net/url" "net/url"
"restic/backend" "restic"
"testing" "testing"
) )
var restPathTests = []struct { var restPathTests = []struct {
Handle backend.Handle Handle restic.Handle
URL *url.URL URL *url.URL
Result string Result string
}{ }{
{ {
URL: parseURL("https://hostname.foo"), URL: parseURL("https://hostname.foo"),
Handle: backend.Handle{ Handle: restic.Handle{
Type: backend.Data, FileType: restic.DataFile,
Name: "foobar", Name: "foobar",
}, },
Result: "https://hostname.foo/data/foobar", Result: "https://hostname.foo/data/foobar",
}, },
{ {
URL: parseURL("https://hostname.foo:1234/prefix/repo"), URL: parseURL("https://hostname.foo:1234/prefix/repo"),
Handle: backend.Handle{ Handle: restic.Handle{
Type: backend.Lock, FileType: restic.LockFile,
Name: "foobar", Name: "foobar",
}, },
Result: "https://hostname.foo:1234/prefix/repo/locks/foobar", Result: "https://hostname.foo:1234/prefix/repo/locks/foobar",
}, },
{ {
URL: parseURL("https://hostname.foo:1234/prefix/repo"), URL: parseURL("https://hostname.foo:1234/prefix/repo"),
Handle: backend.Handle{ Handle: restic.Handle{
Type: backend.Config, FileType: restic.ConfigFile,
Name: "foobar", Name: "foobar",
}, },
Result: "https://hostname.foo:1234/prefix/repo/config", Result: "https://hostname.foo:1234/prefix/repo/config",
}, },

View File

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"net/url" "net/url"
"os" "os"
"restic"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -37,7 +38,7 @@ func init() {
return nil, err return nil, err
} }
exists, err := be.Test(backend.Config, "") exists, err := be.Test(restic.ConfigFile, "")
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -3,13 +3,13 @@ package s3
import ( import (
"bytes" "bytes"
"io" "io"
"restic"
"strings" "strings"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/minio/minio-go" "github.com/minio/minio-go"
"restic/backend"
"restic/debug" "restic/debug"
) )
@ -25,7 +25,7 @@ type s3 struct {
// Open opens the S3 backend at bucket and region. The bucket is created if it // Open opens the S3 backend at bucket and region. The bucket is created if it
// does not exist yet. // does not exist yet.
func Open(cfg Config) (backend.Backend, error) { func Open(cfg Config) (restic.Backend, error) {
debug.Log("s3.Open", "open, config %#v", cfg) debug.Log("s3.Open", "open, config %#v", cfg)
client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, !cfg.UseHTTP) client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, !cfg.UseHTTP)
@ -53,7 +53,7 @@ func Open(cfg Config) (backend.Backend, error) {
return be, nil return be, nil
} }
func (be *s3) s3path(t backend.Type, name string) string { func (be *s3) s3path(t restic.FileType, name string) string {
var path string var path string
if be.prefix != "" { if be.prefix != "" {
@ -61,7 +61,7 @@ func (be *s3) s3path(t backend.Type, name string) string {
} }
path += string(t) path += string(t)
if t == backend.Config { if t == restic.ConfigFile {
return path return path
} }
return path + "/" + name return path + "/" + name
@ -81,11 +81,11 @@ func (be *s3) Location() string {
// Load returns the data stored in the backend for h at the given offset // Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt. // and saves it in p. Load has the same semantics as io.ReaderAt.
func (be s3) Load(h backend.Handle, p []byte, off int64) (n int, err error) { func (be s3) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
var obj *minio.Object var obj *minio.Object
debug.Log("s3.Load", "%v, offset %v, len %v", h, off, len(p)) debug.Log("s3.Load", "%v, offset %v, len %v", h, off, len(p))
path := be.s3path(h.Type, h.Name) path := be.s3path(h.FileType, h.Name)
<-be.connChan <-be.connChan
defer func() { defer func() {
@ -153,14 +153,14 @@ func (be s3) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
} }
// Save stores data in the backend at the handle. // Save stores data in the backend at the handle.
func (be s3) Save(h backend.Handle, p []byte) (err error) { func (be s3) Save(h restic.Handle, p []byte) (err error) {
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return err return err
} }
debug.Log("s3.Save", "%v with %d bytes", h, len(p)) debug.Log("s3.Save", "%v with %d bytes", h, len(p))
path := be.s3path(h.Type, h.Name) path := be.s3path(h.FileType, h.Name)
// Check key does not already exist // Check key does not already exist
_, err = be.client.StatObject(be.bucketname, path) _, err = be.client.StatObject(be.bucketname, path)
@ -183,16 +183,16 @@ func (be s3) Save(h backend.Handle, p []byte) (err error) {
} }
// Stat returns information about a blob. // Stat returns information about a blob.
func (be s3) Stat(h backend.Handle) (bi backend.BlobInfo, err error) { func (be s3) Stat(h restic.Handle) (bi restic.FileInfo, err error) {
debug.Log("s3.Stat", "%v", h) debug.Log("s3.Stat", "%v", h)
path := be.s3path(h.Type, h.Name) path := be.s3path(h.FileType, h.Name)
var obj *minio.Object var obj *minio.Object
obj, err = be.client.GetObject(be.bucketname, path) obj, err = be.client.GetObject(be.bucketname, path)
if err != nil { if err != nil {
debug.Log("s3.Stat", "GetObject() err %v", err) debug.Log("s3.Stat", "GetObject() err %v", err)
return backend.BlobInfo{}, errors.Wrap(err, "client.GetObject") return restic.FileInfo{}, errors.Wrap(err, "client.GetObject")
} }
// make sure that the object is closed properly. // make sure that the object is closed properly.
@ -206,14 +206,14 @@ func (be s3) Stat(h backend.Handle) (bi backend.BlobInfo, err error) {
fi, err := obj.Stat() fi, err := obj.Stat()
if err != nil { if err != nil {
debug.Log("s3.Stat", "Stat() err %v", err) debug.Log("s3.Stat", "Stat() err %v", err)
return backend.BlobInfo{}, errors.Wrap(err, "Stat") return restic.FileInfo{}, errors.Wrap(err, "Stat")
} }
return backend.BlobInfo{Size: fi.Size}, nil return restic.FileInfo{Size: fi.Size}, nil
} }
// Test returns true if a blob of the given type and name exists in the backend. // Test returns true if a blob of the given type and name exists in the backend.
func (be *s3) Test(t backend.Type, name string) (bool, error) { func (be *s3) Test(t restic.FileType, name string) (bool, error) {
found := false found := false
path := be.s3path(t, name) path := be.s3path(t, name)
_, err := be.client.StatObject(be.bucketname, path) _, err := be.client.StatObject(be.bucketname, path)
@ -226,7 +226,7 @@ func (be *s3) Test(t backend.Type, name string) (bool, error) {
} }
// Remove removes the blob with the given name and type. // Remove removes the blob with the given name and type.
func (be *s3) Remove(t backend.Type, name string) error { func (be *s3) Remove(t restic.FileType, name string) error {
path := be.s3path(t, name) path := be.s3path(t, name)
err := be.client.RemoveObject(be.bucketname, path) err := be.client.RemoveObject(be.bucketname, path)
debug.Log("s3.Remove", "%v %v -> err %v", t, name, err) debug.Log("s3.Remove", "%v %v -> err %v", t, name, err)
@ -236,7 +236,7 @@ func (be *s3) Remove(t backend.Type, name string) error {
// List returns a channel that yields all names of blobs of type t. A // List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending // goroutine is started for this. If the channel done is closed, sending
// stops. // stops.
func (be *s3) List(t backend.Type, done <-chan struct{}) <-chan string { func (be *s3) List(t restic.FileType, done <-chan struct{}) <-chan string {
debug.Log("s3.List", "listing %v", t) debug.Log("s3.List", "listing %v", t)
ch := make(chan string) ch := make(chan string)
@ -264,11 +264,11 @@ func (be *s3) List(t backend.Type, done <-chan struct{}) <-chan string {
} }
// Remove keys for a specified backend type. // Remove keys for a specified backend type.
func (be *s3) removeKeys(t backend.Type) error { func (be *s3) removeKeys(t restic.FileType) error {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
for key := range be.List(backend.Data, done) { for key := range be.List(restic.DataFile, done) {
err := be.Remove(backend.Data, key) err := be.Remove(restic.DataFile, key)
if err != nil { if err != nil {
return err return err
} }
@ -279,12 +279,12 @@ func (be *s3) removeKeys(t backend.Type) error {
// Delete removes all restic keys in the bucket. It will not remove the bucket itself. // Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *s3) Delete() error { func (be *s3) Delete() error {
alltypes := []backend.Type{ alltypes := []restic.FileType{
backend.Data, restic.DataFile,
backend.Key, restic.KeyFile,
backend.Lock, restic.LockFile,
backend.Snapshot, restic.SnapshotFile,
backend.Index} restic.IndexFile}
for _, t := range alltypes { for _, t := range alltypes {
err := be.removeKeys(t) err := be.removeKeys(t)
@ -293,7 +293,7 @@ func (be *s3) Delete() error {
} }
} }
return be.Remove(backend.Config, "") return be.Remove(restic.ConfigFile, "")
} }
// Close does nothing // Close does nothing

View File

@ -44,7 +44,7 @@ func init() {
return nil, err return nil, err
} }
exists, err := be.Test(backend.Config, "") exists, err := be.Test(restic.ConfigFile, "")
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -9,6 +9,7 @@ import (
"os" "os"
"os/exec" "os/exec"
"path" "path"
"restic"
"strings" "strings"
"time" "time"
@ -256,11 +257,11 @@ func (r *SFTP) mkdirAll(dir string, mode os.FileMode) error {
} }
// Rename temp file to final name according to type and name. // Rename temp file to final name according to type and name.
func (r *SFTP) renameFile(oldname string, t backend.Type, name string) error { func (r *SFTP) renameFile(oldname string, t restic.FileType, name string) error {
filename := r.filename(t, name) filename := r.filename(t, name)
// create directories if necessary // create directories if necessary
if t == backend.Data { if t == restic.DataFile {
err := r.mkdirAll(path.Dir(filename), backend.Modes.Dir) err := r.mkdirAll(path.Dir(filename), backend.Modes.Dir)
if err != nil { if err != nil {
return err return err
@ -293,9 +294,9 @@ func Join(parts ...string) string {
return path.Clean(path.Join(parts...)) return path.Clean(path.Join(parts...))
} }
// Construct path for given backend.Type and name. // Construct path for given restic.Type and name.
func (r *SFTP) filename(t backend.Type, name string) string { func (r *SFTP) filename(t restic.FileType, name string) string {
if t == backend.Config { if t == restic.ConfigFile {
return Join(r.p, "config") return Join(r.p, "config")
} }
@ -303,21 +304,21 @@ func (r *SFTP) filename(t backend.Type, name string) string {
} }
// Construct directory for given backend.Type. // Construct directory for given backend.Type.
func (r *SFTP) dirname(t backend.Type, name string) string { func (r *SFTP) dirname(t restic.FileType, name string) string {
var n string var n string
switch t { switch t {
case backend.Data: case restic.DataFile:
n = backend.Paths.Data n = backend.Paths.Data
if len(name) > 2 { if len(name) > 2 {
n = Join(n, name[:2]) n = Join(n, name[:2])
} }
case backend.Snapshot: case restic.SnapshotFile:
n = backend.Paths.Snapshots n = backend.Paths.Snapshots
case backend.Index: case restic.IndexFile:
n = backend.Paths.Index n = backend.Paths.Index
case backend.Lock: case restic.LockFile:
n = backend.Paths.Locks n = backend.Paths.Locks
case backend.Key: case restic.KeyFile:
n = backend.Paths.Keys n = backend.Paths.Keys
} }
return Join(r.p, n) return Join(r.p, n)
@ -325,7 +326,7 @@ func (r *SFTP) dirname(t backend.Type, name string) string {
// Load returns the data stored in the backend for h at the given offset // Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt. // and saves it in p. Load has the same semantics as io.ReaderAt.
func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) { func (r *SFTP) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
debug.Log("sftp.Load", "load %v, %d bytes, offset %v", h, len(p), off) debug.Log("sftp.Load", "load %v, %d bytes, offset %v", h, len(p), off)
if err := r.clientError(); err != nil { if err := r.clientError(); err != nil {
return 0, err return 0, err
@ -335,7 +336,7 @@ func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
return 0, err return 0, err
} }
f, err := r.c.Open(r.filename(h.Type, h.Name)) f, err := r.c.Open(r.filename(h.FileType, h.Name))
if err != nil { if err != nil {
return 0, errors.Wrap(err, "Open") return 0, errors.Wrap(err, "Open")
} }
@ -362,7 +363,7 @@ func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
} }
// Save stores data in the backend at the handle. // Save stores data in the backend at the handle.
func (r *SFTP) Save(h backend.Handle, p []byte) (err error) { func (r *SFTP) Save(h restic.Handle, p []byte) (err error) {
debug.Log("sftp.Save", "save %v bytes to %v", h, len(p)) debug.Log("sftp.Save", "save %v bytes to %v", h, len(p))
if err := r.clientError(); err != nil { if err := r.clientError(); err != nil {
return err return err
@ -393,14 +394,14 @@ func (r *SFTP) Save(h backend.Handle, p []byte) (err error) {
return errors.Wrap(err, "Close") return errors.Wrap(err, "Close")
} }
err = r.renameFile(filename, h.Type, h.Name) err = r.renameFile(filename, h.FileType, h.Name)
debug.Log("sftp.Save", "save %v: rename %v: %v", debug.Log("sftp.Save", "save %v: rename %v: %v",
h, path.Base(filename), err) h, path.Base(filename), err)
return err return err
} }
// Stat returns information about a blob. // Stat returns information about a blob.
func (r *SFTP) Stat(h backend.Handle) (backend.BlobInfo, error) { func (r *SFTP) Stat(h restic.Handle) (backend.BlobInfo, error) {
debug.Log("sftp.Stat", "stat %v", h) debug.Log("sftp.Stat", "stat %v", h)
if err := r.clientError(); err != nil { if err := r.clientError(); err != nil {
return backend.BlobInfo{}, err return backend.BlobInfo{}, err
@ -410,7 +411,7 @@ func (r *SFTP) Stat(h backend.Handle) (backend.BlobInfo, error) {
return backend.BlobInfo{}, err return backend.BlobInfo{}, err
} }
fi, err := r.c.Lstat(r.filename(h.Type, h.Name)) fi, err := r.c.Lstat(r.filename(h.FileType, h.Name))
if err != nil { if err != nil {
return backend.BlobInfo{}, errors.Wrap(err, "Lstat") return backend.BlobInfo{}, errors.Wrap(err, "Lstat")
} }
@ -419,7 +420,7 @@ func (r *SFTP) Stat(h backend.Handle) (backend.BlobInfo, error) {
} }
// Test returns true if a blob of the given type and name exists in the backend. // Test returns true if a blob of the given type and name exists in the backend.
func (r *SFTP) Test(t backend.Type, name string) (bool, error) { func (r *SFTP) Test(t restic.FileType, name string) (bool, error) {
debug.Log("sftp.Test", "type %v, name %v", t, name) debug.Log("sftp.Test", "type %v, name %v", t, name)
if err := r.clientError(); err != nil { if err := r.clientError(); err != nil {
return false, err return false, err
@ -438,7 +439,7 @@ func (r *SFTP) Test(t backend.Type, name string) (bool, error) {
} }
// Remove removes the content stored at name. // Remove removes the content stored at name.
func (r *SFTP) Remove(t backend.Type, name string) error { func (r *SFTP) Remove(t restic.FileType, name string) error {
debug.Log("sftp.Remove", "type %v, name %v", t, name) debug.Log("sftp.Remove", "type %v, name %v", t, name)
if err := r.clientError(); err != nil { if err := r.clientError(); err != nil {
return err return err
@ -450,14 +451,14 @@ func (r *SFTP) Remove(t backend.Type, name string) error {
// List returns a channel that yields all names of blobs of type t. A // List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending // goroutine is started for this. If the channel done is closed, sending
// stops. // stops.
func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string { func (r *SFTP) List(t restic.FileType, done <-chan struct{}) <-chan string {
debug.Log("sftp.List", "list all %v", t) debug.Log("sftp.List", "list all %v", t)
ch := make(chan string) ch := make(chan string)
go func() { go func() {
defer close(ch) defer close(ch)
if t == backend.Data { if t == restic.DataFile {
// read first level // read first level
basedir := r.dirname(t, "") basedir := r.dirname(t, "")

View File

@ -7,6 +7,7 @@ import (
"io/ioutil" "io/ioutil"
"math/rand" "math/rand"
"reflect" "reflect"
"restic"
"sort" "sort"
"testing" "testing"
@ -118,7 +119,7 @@ func TestCreateWithConfig(t testing.TB) {
defer close(t) defer close(t)
// save a config // save a config
store(t, b, backend.Config, []byte("test config")) store(t, b, restic.ConfigFile, []byte("test config"))
// now create the backend again, this must fail // now create the backend again, this must fail
_, err := CreateFn() _, err := CreateFn()
@ -127,7 +128,7 @@ func TestCreateWithConfig(t testing.TB) {
} }
// remove config // remove config
err = b.Remove(backend.Config, "") err = b.Remove(restic.ConfigFile, "")
if err != nil { if err != nil {
t.Fatalf("unexpected error removing config: %v", err) t.Fatalf("unexpected error removing config: %v", err)
} }
@ -152,12 +153,12 @@ func TestConfig(t testing.TB) {
var testString = "Config" var testString = "Config"
// create config and read it back // create config and read it back
_, err := backend.LoadAll(b, backend.Handle{Type: backend.Config}, nil) _, err := backend.LoadAll(b, restic.Handle{Type: restic.ConfigFile}, nil)
if err == nil { if err == nil {
t.Fatalf("did not get expected error for non-existing config") t.Fatalf("did not get expected error for non-existing config")
} }
err = b.Save(backend.Handle{Type: backend.Config}, []byte(testString)) err = b.Save(restic.Handle{Type: restic.ConfigFile}, []byte(testString))
if err != nil { if err != nil {
t.Fatalf("Save() error: %v", err) t.Fatalf("Save() error: %v", err)
} }
@ -165,7 +166,7 @@ func TestConfig(t testing.TB) {
// try accessing the config with different names, should all return the // try accessing the config with different names, should all return the
// same config // same config
for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} { for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} {
h := backend.Handle{Type: backend.Config, Name: name} h := restic.Handle{Type: restic.ConfigFile, Name: name}
buf, err := backend.LoadAll(b, h, nil) buf, err := backend.LoadAll(b, h, nil)
if err != nil { if err != nil {
t.Fatalf("unable to read config with name %q: %v", name, err) t.Fatalf("unable to read config with name %q: %v", name, err)
@ -182,12 +183,12 @@ func TestLoad(t testing.TB) {
b := open(t) b := open(t)
defer close(t) defer close(t)
_, err := b.Load(backend.Handle{}, nil, 0) _, err := b.Load(restic.Handle{}, nil, 0)
if err == nil { if err == nil {
t.Fatalf("Load() did not return an error for invalid handle") t.Fatalf("Load() did not return an error for invalid handle")
} }
_, err = b.Load(backend.Handle{Type: backend.Data, Name: "foobar"}, nil, 0) _, err = b.Load(restic.Handle{Type: restic.DataFile, Name: "foobar"}, nil, 0)
if err == nil { if err == nil {
t.Fatalf("Load() did not return an error for non-existing blob") t.Fatalf("Load() did not return an error for non-existing blob")
} }
@ -197,7 +198,7 @@ func TestLoad(t testing.TB) {
data := Random(23, length) data := Random(23, length)
id := backend.Hash(data) id := backend.Hash(data)
handle := backend.Handle{Type: backend.Data, Name: id.String()} handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
err = b.Save(handle, data) err = b.Save(handle, data)
if err != nil { if err != nil {
t.Fatalf("Save() error: %v", err) t.Fatalf("Save() error: %v", err)
@ -309,7 +310,7 @@ func TestLoad(t testing.TB) {
t.Errorf("wrong error returned for larger buffer: want io.ErrUnexpectedEOF, got %#v", err) t.Errorf("wrong error returned for larger buffer: want io.ErrUnexpectedEOF, got %#v", err)
} }
OK(t, b.Remove(backend.Data, id.String())) OK(t, b.Remove(restic.DataFile, id.String()))
} }
// TestLoadNegativeOffset tests the backend's Load function with negative offsets. // TestLoadNegativeOffset tests the backend's Load function with negative offsets.
@ -322,7 +323,7 @@ func TestLoadNegativeOffset(t testing.TB) {
data := Random(23, length) data := Random(23, length)
id := backend.Hash(data) id := backend.Hash(data)
handle := backend.Handle{Type: backend.Data, Name: id.String()} handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
err := b.Save(handle, data) err := b.Save(handle, data)
if err != nil { if err != nil {
t.Fatalf("Save() error: %v", err) t.Fatalf("Save() error: %v", err)
@ -365,7 +366,7 @@ func TestLoadNegativeOffset(t testing.TB) {
} }
OK(t, b.Remove(backend.Data, id.String())) OK(t, b.Remove(restic.DataFile, id.String()))
} }
// TestSave tests saving data in the backend. // TestSave tests saving data in the backend.
@ -380,8 +381,8 @@ func TestSave(t testing.TB) {
// use the first 32 byte as the ID // use the first 32 byte as the ID
copy(id[:], data) copy(id[:], data)
h := backend.Handle{ h := restic.Handle{
Type: backend.Data, Type: restic.DataFile,
Name: fmt.Sprintf("%s-%d", id, i), Name: fmt.Sprintf("%s-%d", id, i),
} }
err := b.Save(h, data) err := b.Save(h, data)
@ -429,7 +430,7 @@ func TestSaveFilenames(t testing.TB) {
defer close(t) defer close(t)
for i, test := range filenameTests { for i, test := range filenameTests {
h := backend.Handle{Name: test.name, Type: backend.Data} h := restic.Handle{Name: test.name, Type: restic.DataFile}
err := b.Save(h, []byte(test.data)) err := b.Save(h, []byte(test.data))
if err != nil { if err != nil {
t.Errorf("test %d failed: Save() returned %v", i, err) t.Errorf("test %d failed: Save() returned %v", i, err)
@ -464,9 +465,9 @@ var testStrings = []struct {
{"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"}, {"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"},
} }
func store(t testing.TB, b backend.Backend, tpe backend.Type, data []byte) { func store(t testing.TB, b backend.Backend, tpe restic.FileType, data []byte) {
id := backend.Hash(data) id := backend.Hash(data)
err := b.Save(backend.Handle{Name: id.String(), Type: tpe}, data) err := b.Save(restic.Handle{Name: id.String(), Type: tpe}, data)
OK(t, err) OK(t, err)
} }
@ -483,9 +484,9 @@ func TestBackend(t testing.TB) {
b := open(t) b := open(t)
defer close(t) defer close(t)
for _, tpe := range []backend.Type{ for _, tpe := range []restic.FileType{
backend.Data, backend.Key, backend.Lock, restic.DataFile, restic.KeyFile, restic.LockFile,
backend.Snapshot, backend.Index, restic.SnapshotFile, restic.IndexFile,
} { } {
// detect non-existing files // detect non-existing files
for _, test := range testStrings { for _, test := range testStrings {
@ -498,7 +499,7 @@ func TestBackend(t testing.TB) {
Assert(t, !ret, "blob was found to exist before creating") Assert(t, !ret, "blob was found to exist before creating")
// try to stat a not existing blob // try to stat a not existing blob
h := backend.Handle{Type: tpe, Name: id.String()} h := restic.Handle{Type: tpe, Name: id.String()}
_, err = b.Stat(h) _, err = b.Stat(h)
Assert(t, err != nil, "blob data could be extracted before creation") Assert(t, err != nil, "blob data could be extracted before creation")
@ -517,7 +518,7 @@ func TestBackend(t testing.TB) {
store(t, b, tpe, []byte(test.data)) store(t, b, tpe, []byte(test.data))
// test Load() // test Load()
h := backend.Handle{Type: tpe, Name: test.id} h := restic.Handle{Type: tpe, Name: test.id}
buf, err := backend.LoadAll(b, h, nil) buf, err := backend.LoadAll(b, h, nil)
OK(t, err) OK(t, err)
Equals(t, test.data, string(buf)) Equals(t, test.data, string(buf))
@ -538,7 +539,7 @@ func TestBackend(t testing.TB) {
test := testStrings[0] test := testStrings[0]
// create blob // create blob
err := b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data)) err := b.Save(restic.Handle{Type: tpe, Name: test.id}, []byte(test.data))
Assert(t, err != nil, "expected error, got %v", err) Assert(t, err != nil, "expected error, got %v", err)
// remove and recreate // remove and recreate
@ -551,7 +552,7 @@ func TestBackend(t testing.TB) {
Assert(t, ok == false, "removed blob still present") Assert(t, ok == false, "removed blob still present")
// create blob // create blob
err = b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data)) err = b.Save(restic.Handle{Type: tpe, Name: test.id}, []byte(test.data))
OK(t, err) OK(t, err)
// list items // list items

View File

@ -2,6 +2,7 @@ package backend
import ( import (
"io" "io"
"restic"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -10,7 +11,7 @@ import (
// is resized to accomodate all data in the blob. Errors returned by be.Load() // is resized to accomodate all data in the blob. Errors returned by be.Load()
// are passed on, except io.ErrUnexpectedEOF is silenced and nil returned // are passed on, except io.ErrUnexpectedEOF is silenced and nil returned
// instead, since it means this function is working properly. // instead, since it means this function is working properly.
func LoadAll(be Backend, h Handle, buf []byte) ([]byte, error) { func LoadAll(be restic.Backend, h restic.Handle, buf []byte) ([]byte, error) {
fi, err := be.Stat(h) fi, err := be.Stat(h)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Stat") return nil, errors.Wrap(err, "Stat")

View File

@ -20,10 +20,10 @@ func TestLoadAll(t *testing.T) {
data := Random(23+i, rand.Intn(MiB)+500*KiB) data := Random(23+i, rand.Intn(MiB)+500*KiB)
id := backend.Hash(data) id := backend.Hash(data)
err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data) err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data)
OK(t, err) OK(t, err)
buf, err := backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, nil) buf, err := backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, nil)
OK(t, err) OK(t, err)
if len(buf) != len(data) { if len(buf) != len(data) {
@ -45,11 +45,11 @@ func TestLoadSmallBuffer(t *testing.T) {
data := Random(23+i, rand.Intn(MiB)+500*KiB) data := Random(23+i, rand.Intn(MiB)+500*KiB)
id := backend.Hash(data) id := backend.Hash(data)
err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data) err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data)
OK(t, err) OK(t, err)
buf := make([]byte, len(data)-23) buf := make([]byte, len(data)-23)
buf, err = backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, buf) buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf)
OK(t, err) OK(t, err)
if len(buf) != len(data) { if len(buf) != len(data) {
@ -71,11 +71,11 @@ func TestLoadLargeBuffer(t *testing.T) {
data := Random(23+i, rand.Intn(MiB)+500*KiB) data := Random(23+i, rand.Intn(MiB)+500*KiB)
id := backend.Hash(data) id := backend.Hash(data)
err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data) err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data)
OK(t, err) OK(t, err)
buf := make([]byte, len(data)+100) buf := make([]byte, len(data)+100)
buf, err = backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, buf) buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf)
OK(t, err) OK(t, err)
if len(buf) != len(data) { if len(buf) != len(data) {

View File

@ -0,0 +1,70 @@
package restic
import "github.com/pkg/errors"
// ErrNoIDPrefixFound is returned by Find() when no ID for the given prefix
// could be found.
var ErrNoIDPrefixFound = errors.New("no ID found")
// ErrMultipleIDMatches is returned by Find() when multiple IDs with the given
// prefix are found.
var ErrMultipleIDMatches = errors.New("multiple IDs with prefix found")
// Find loads the list of all files of type t and searches for names which
// start with prefix. If none is found, nil and ErrNoIDPrefixFound is returned.
// If more than one is found, nil and ErrMultipleIDMatches is returned.
func Find(be Lister, t FileType, prefix string) (string, error) {
done := make(chan struct{})
defer close(done)
match := ""
// TODO: optimize by sorting list etc.
for name := range be.List(t, done) {
if prefix == name[:len(prefix)] {
if match == "" {
match = name
} else {
return "", ErrMultipleIDMatches
}
}
}
if match != "" {
return match, nil
}
return "", ErrNoIDPrefixFound
}
const minPrefixLength = 8
// PrefixLength returns the number of bytes required so that all prefixes of
// all names of type t are unique.
func PrefixLength(be Lister, t FileType) (int, error) {
done := make(chan struct{})
defer close(done)
// load all IDs of the given type
list := make([]string, 0, 100)
for name := range be.List(t, done) {
list = append(list, name)
}
// select prefixes of length l, test if the last one is the same as the current one
outer:
for l := minPrefixLength; l < IDSize; l++ {
var last string
for _, name := range list {
if last == name[:l] {
continue outer
}
last = name[:l]
}
return l, nil
}
return IDSize, nil
}

View File

@ -21,14 +21,14 @@ import (
// A Checker only tests for internal errors within the data structures of the // A Checker only tests for internal errors within the data structures of the
// repository (e.g. missing blobs), and needs a valid Repository to work on. // repository (e.g. missing blobs), and needs a valid Repository to work on.
type Checker struct { type Checker struct {
packs backend.IDSet packs restic.IDSet
blobs backend.IDSet blobs restic.IDSet
blobRefs struct { blobRefs struct {
sync.Mutex sync.Mutex
M map[backend.ID]uint M map[restic.ID]uint
} }
indexes map[backend.ID]*repository.Index indexes map[restic.ID]*repository.Index
orphanedPacks backend.IDs orphanedPacks restic.IDs
masterIndex *repository.MasterIndex masterIndex *repository.MasterIndex
@ -38,14 +38,14 @@ type Checker struct {
// New returns a new checker which runs on repo. // New returns a new checker which runs on repo.
func New(repo *repository.Repository) *Checker { func New(repo *repository.Repository) *Checker {
c := &Checker{ c := &Checker{
packs: backend.NewIDSet(), packs: restic.NewIDSet(),
blobs: backend.NewIDSet(), blobs: restic.NewIDSet(),
masterIndex: repository.NewMasterIndex(), masterIndex: repository.NewMasterIndex(),
indexes: make(map[backend.ID]*repository.Index), indexes: make(map[restic.ID]*repository.Index),
repo: repo, repo: repo,
} }
c.blobRefs.M = make(map[backend.ID]uint) c.blobRefs.M = make(map[restic.ID]uint)
return c return c
} }
@ -54,8 +54,8 @@ const defaultParallelism = 40
// ErrDuplicatePacks is returned when a pack is found in more than one index. // ErrDuplicatePacks is returned when a pack is found in more than one index.
type ErrDuplicatePacks struct { type ErrDuplicatePacks struct {
PackID backend.ID PackID restic.ID
Indexes backend.IDSet Indexes restic.IDSet
} }
func (e ErrDuplicatePacks) Error() string { func (e ErrDuplicatePacks) Error() string {
@ -65,7 +65,7 @@ func (e ErrDuplicatePacks) Error() string {
// ErrOldIndexFormat is returned when an index with the old format is // ErrOldIndexFormat is returned when an index with the old format is
// found. // found.
type ErrOldIndexFormat struct { type ErrOldIndexFormat struct {
backend.ID restic.ID
} }
func (err ErrOldIndexFormat) Error() string { func (err ErrOldIndexFormat) Error() string {
@ -82,7 +82,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
indexCh := make(chan indexRes) indexCh := make(chan indexRes)
worker := func(id backend.ID, done <-chan struct{}) error { worker := func(id restic.ID, done <-chan struct{}) error {
debug.Log("LoadIndex", "worker got index %v", id) debug.Log("LoadIndex", "worker got index %v", id)
idx, err := repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeIndex) idx, err := repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeIndex)
if errors.Cause(err) == repository.ErrOldIndexFormat { if errors.Cause(err) == repository.ErrOldIndexFormat {
@ -108,7 +108,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
go func() { go func() {
defer close(indexCh) defer close(indexCh)
debug.Log("LoadIndex", "start loading indexes in parallel") debug.Log("LoadIndex", "start loading indexes in parallel")
perr = repository.FilesInParallel(c.repo.Backend(), backend.Index, defaultParallelism, perr = repository.FilesInParallel(c.repo.Backend(), restic.IndexFile, defaultParallelism,
repository.ParallelWorkFuncParseID(worker)) repository.ParallelWorkFuncParseID(worker))
debug.Log("LoadIndex", "loading indexes finished, error: %v", perr) debug.Log("LoadIndex", "loading indexes finished, error: %v", perr)
}() }()
@ -121,11 +121,11 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
return hints, errs return hints, errs
} }
packToIndex := make(map[backend.ID]backend.IDSet) packToIndex := make(map[restic.ID]restic.IDSet)
for res := range indexCh { for res := range indexCh {
debug.Log("LoadIndex", "process index %v", res.ID) debug.Log("LoadIndex", "process index %v", res.ID)
idxID, err := backend.ParseID(res.ID) idxID, err := restic.ParseID(res.ID)
if err != nil { if err != nil {
errs = append(errs, errors.Errorf("unable to parse as index ID: %v", res.ID)) errs = append(errs, errors.Errorf("unable to parse as index ID: %v", res.ID))
continue continue
@ -143,7 +143,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
cnt++ cnt++
if _, ok := packToIndex[blob.PackID]; !ok { if _, ok := packToIndex[blob.PackID]; !ok {
packToIndex[blob.PackID] = backend.NewIDSet() packToIndex[blob.PackID] = restic.NewIDSet()
} }
packToIndex[blob.PackID].Insert(idxID) packToIndex[blob.PackID].Insert(idxID)
} }
@ -171,7 +171,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
// PackError describes an error with a specific pack. // PackError describes an error with a specific pack.
type PackError struct { type PackError struct {
ID backend.ID ID restic.ID
Orphaned bool Orphaned bool
Err error Err error
} }
@ -180,14 +180,14 @@ func (e PackError) Error() string {
return "pack " + e.ID.String() + ": " + e.Err.Error() return "pack " + e.ID.String() + ": " + e.Err.Error()
} }
func packIDTester(repo *repository.Repository, inChan <-chan backend.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) { func packIDTester(repo *repository.Repository, inChan <-chan restic.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) {
debug.Log("Checker.testPackID", "worker start") debug.Log("Checker.testPackID", "worker start")
defer debug.Log("Checker.testPackID", "worker done") defer debug.Log("Checker.testPackID", "worker done")
defer wg.Done() defer wg.Done()
for id := range inChan { for id := range inChan {
ok, err := repo.Backend().Test(backend.Data, id.String()) ok, err := repo.Backend().Test(restic.DataFile, id.String())
if err != nil { if err != nil {
err = PackError{ID: id, Err: err} err = PackError{ID: id, Err: err}
} else { } else {
@ -218,11 +218,11 @@ func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) {
defer close(errChan) defer close(errChan)
debug.Log("Checker.Packs", "checking for %d packs", len(c.packs)) debug.Log("Checker.Packs", "checking for %d packs", len(c.packs))
seenPacks := backend.NewIDSet() seenPacks := restic.NewIDSet()
var workerWG sync.WaitGroup var workerWG sync.WaitGroup
IDChan := make(chan backend.ID) IDChan := make(chan restic.ID)
for i := 0; i < defaultParallelism; i++ { for i := 0; i < defaultParallelism; i++ {
workerWG.Add(1) workerWG.Add(1)
go packIDTester(c.repo, IDChan, errChan, &workerWG, done) go packIDTester(c.repo, IDChan, errChan, &workerWG, done)
@ -238,7 +238,7 @@ func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) {
workerWG.Wait() workerWG.Wait()
debug.Log("Checker.Packs", "workers terminated") debug.Log("Checker.Packs", "workers terminated")
for id := range c.repo.List(backend.Data, done) { for id := range c.repo.List(restic.DataFile, done) {
debug.Log("Checker.Packs", "check data blob %v", id.Str()) debug.Log("Checker.Packs", "check data blob %v", id.Str())
if !seenPacks.Has(id) { if !seenPacks.Has(id) {
c.orphanedPacks = append(c.orphanedPacks, id) c.orphanedPacks = append(c.orphanedPacks, id)
@ -253,8 +253,8 @@ func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) {
// Error is an error that occurred while checking a repository. // Error is an error that occurred while checking a repository.
type Error struct { type Error struct {
TreeID backend.ID TreeID restic.ID
BlobID backend.ID BlobID restic.ID
Err error Err error
} }
@ -273,25 +273,25 @@ func (e Error) Error() string {
return e.Err.Error() return e.Err.Error()
} }
func loadTreeFromSnapshot(repo *repository.Repository, id backend.ID) (backend.ID, error) { func loadTreeFromSnapshot(repo *repository.Repository, id restic.ID) (restic.ID, error) {
sn, err := restic.LoadSnapshot(repo, id) sn, err := restic.LoadSnapshot(repo, id)
if err != nil { if err != nil {
debug.Log("Checker.loadTreeFromSnapshot", "error loading snapshot %v: %v", id.Str(), err) debug.Log("Checker.loadTreeFromSnapshot", "error loading snapshot %v: %v", id.Str(), err)
return backend.ID{}, err return restic.ID{}, err
} }
if sn.Tree == nil { if sn.Tree == nil {
debug.Log("Checker.loadTreeFromSnapshot", "snapshot %v has no tree", id.Str()) debug.Log("Checker.loadTreeFromSnapshot", "snapshot %v has no tree", id.Str())
return backend.ID{}, errors.Errorf("snapshot %v has no tree", id) return restic.ID{}, errors.Errorf("snapshot %v has no tree", id)
} }
return *sn.Tree, nil return *sn.Tree, nil
} }
// loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs. // loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs.
func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) { func loadSnapshotTreeIDs(repo *repository.Repository) (restic.IDs, []error) {
var trees struct { var trees struct {
IDs backend.IDs IDs restic.IDs
sync.Mutex sync.Mutex
} }
@ -301,7 +301,7 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) {
} }
snapshotWorker := func(strID string, done <-chan struct{}) error { snapshotWorker := func(strID string, done <-chan struct{}) error {
id, err := backend.ParseID(strID) id, err := restic.ParseID(strID)
if err != nil { if err != nil {
return err return err
} }
@ -324,7 +324,7 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) {
return nil return nil
} }
err := repository.FilesInParallel(repo.Backend(), backend.Snapshot, defaultParallelism, snapshotWorker) err := repository.FilesInParallel(repo.Backend(), restic.SnapshotFile, defaultParallelism, snapshotWorker)
if err != nil { if err != nil {
errs.errs = append(errs.errs, err) errs.errs = append(errs.errs, err)
} }
@ -334,7 +334,7 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) {
// TreeError collects several errors that occurred while processing a tree. // TreeError collects several errors that occurred while processing a tree.
type TreeError struct { type TreeError struct {
ID backend.ID ID restic.ID
Errors []error Errors []error
} }
@ -343,14 +343,14 @@ func (e TreeError) Error() string {
} }
type treeJob struct { type treeJob struct {
backend.ID restic.ID
error error
*restic.Tree *restic.Tree
} }
// loadTreeWorker loads trees from repo and sends them to out. // loadTreeWorker loads trees from repo and sends them to out.
func loadTreeWorker(repo *repository.Repository, func loadTreeWorker(repo *repository.Repository,
in <-chan backend.ID, out chan<- treeJob, in <-chan restic.ID, out chan<- treeJob,
done <-chan struct{}, wg *sync.WaitGroup) { done <-chan struct{}, wg *sync.WaitGroup) {
defer func() { defer func() {
@ -454,7 +454,7 @@ func (c *Checker) checkTreeWorker(in <-chan treeJob, out chan<- error, done <-ch
} }
} }
func filterTrees(backlog backend.IDs, loaderChan chan<- backend.ID, in <-chan treeJob, out chan<- treeJob, done <-chan struct{}) { func filterTrees(backlog restic.IDs, loaderChan chan<- restic.ID, in <-chan treeJob, out chan<- treeJob, done <-chan struct{}) {
defer func() { defer func() {
debug.Log("checker.filterTrees", "closing output channels") debug.Log("checker.filterTrees", "closing output channels")
close(loaderChan) close(loaderChan)
@ -466,7 +466,7 @@ func filterTrees(backlog backend.IDs, loaderChan chan<- backend.ID, in <-chan tr
outCh = out outCh = out
loadCh = loaderChan loadCh = loaderChan
job treeJob job treeJob
nextTreeID backend.ID nextTreeID restic.ID
outstandingLoadTreeJobs = 0 outstandingLoadTreeJobs = 0
) )
@ -559,7 +559,7 @@ func (c *Checker) Structure(errChan chan<- error, done <-chan struct{}) {
} }
} }
treeIDChan := make(chan backend.ID) treeIDChan := make(chan restic.ID)
treeJobChan1 := make(chan treeJob) treeJobChan1 := make(chan treeJob)
treeJobChan2 := make(chan treeJob) treeJobChan2 := make(chan treeJob)
@ -575,10 +575,10 @@ func (c *Checker) Structure(errChan chan<- error, done <-chan struct{}) {
wg.Wait() wg.Wait()
} }
func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) { func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) {
debug.Log("Checker.checkTree", "checking tree %v", id.Str()) debug.Log("Checker.checkTree", "checking tree %v", id.Str())
var blobs []backend.ID var blobs []restic.ID
for _, node := range tree.Nodes { for _, node := range tree.Nodes {
switch node.FileType { switch node.FileType {
@ -634,7 +634,7 @@ func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) {
} }
// UnusedBlobs returns all blobs that have never been referenced. // UnusedBlobs returns all blobs that have never been referenced.
func (c *Checker) UnusedBlobs() (blobs backend.IDs) { func (c *Checker) UnusedBlobs() (blobs restic.IDs) {
c.blobRefs.Lock() c.blobRefs.Lock()
defer c.blobRefs.Unlock() defer c.blobRefs.Unlock()
@ -650,7 +650,7 @@ func (c *Checker) UnusedBlobs() (blobs backend.IDs) {
} }
// OrphanedPacks returns a slice of unused packs (only available after Packs() was run). // OrphanedPacks returns a slice of unused packs (only available after Packs() was run).
func (c *Checker) OrphanedPacks() backend.IDs { func (c *Checker) OrphanedPacks() restic.IDs {
return c.orphanedPacks return c.orphanedPacks
} }
@ -660,15 +660,15 @@ func (c *Checker) CountPacks() uint64 {
} }
// checkPack reads a pack and checks the integrity of all blobs. // checkPack reads a pack and checks the integrity of all blobs.
func checkPack(r *repository.Repository, id backend.ID) error { func checkPack(r *repository.Repository, id restic.ID) error {
debug.Log("Checker.checkPack", "checking pack %v", id.Str()) debug.Log("Checker.checkPack", "checking pack %v", id.Str())
h := backend.Handle{Type: backend.Data, Name: id.String()} h := restic.Handle{FileType: restic.DataFile, Name: id.String()}
buf, err := backend.LoadAll(r.Backend(), h, nil) buf, err := backend.LoadAll(r.Backend(), h, nil)
if err != nil { if err != nil {
return err return err
} }
hash := backend.Hash(buf) hash := restic.Hash(buf)
if !hash.Equal(id) { if !hash.Equal(id) {
debug.Log("Checker.checkPack", "Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) debug.Log("Checker.checkPack", "Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
return errors.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) return errors.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
@ -691,7 +691,7 @@ func checkPack(r *repository.Repository, id backend.ID) error {
continue continue
} }
hash := backend.Hash(plainBuf) hash := restic.Hash(plainBuf)
if !hash.Equal(blob.ID) { if !hash.Equal(blob.ID) {
debug.Log("Checker.checkPack", " Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()) debug.Log("Checker.checkPack", " Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())
errs = append(errs, errors.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())) errs = append(errs, errors.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()))
@ -713,10 +713,10 @@ func (c *Checker) ReadData(p *restic.Progress, errChan chan<- error, done <-chan
p.Start() p.Start()
defer p.Done() defer p.Done()
worker := func(wg *sync.WaitGroup, in <-chan backend.ID) { worker := func(wg *sync.WaitGroup, in <-chan restic.ID) {
defer wg.Done() defer wg.Done()
for { for {
var id backend.ID var id restic.ID
var ok bool var ok bool
select { select {
@ -742,7 +742,7 @@ func (c *Checker) ReadData(p *restic.Progress, errChan chan<- error, done <-chan
} }
} }
ch := c.repo.List(backend.Data, done) ch := c.repo.List(restic.DataFile, done)
var wg sync.WaitGroup var wg sync.WaitGroup
for i := 0; i < defaultParallelism; i++ { for i := 0; i < defaultParallelism; i++ {

View File

@ -17,7 +17,7 @@ import (
var checkerTestData = filepath.Join("testdata", "checker-test-repo.tar.gz") var checkerTestData = filepath.Join("testdata", "checker-test-repo.tar.gz")
func list(repo *repository.Repository, t backend.Type) (IDs []string) { func list(repo *repository.Repository, t restic.FileType) (IDs []string) {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
@ -83,7 +83,7 @@ func TestMissingPack(t *testing.T) {
repo := OpenLocalRepo(t, repodir) repo := OpenLocalRepo(t, repodir)
packID := "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6" packID := "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6"
OK(t, repo.Backend().Remove(backend.Data, packID)) OK(t, repo.Backend().Remove(restic.DataFile, packID))
chkr := checker.New(repo) chkr := checker.New(repo)
hints, errs := chkr.LoadIndex() hints, errs := chkr.LoadIndex()
@ -115,7 +115,7 @@ func TestUnreferencedPack(t *testing.T) {
// index 3f1a only references pack 60e0 // index 3f1a only references pack 60e0
indexID := "3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44" indexID := "3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44"
packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e" packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e"
OK(t, repo.Backend().Remove(backend.Index, indexID)) OK(t, repo.Backend().Remove(restic.IndexFile, indexID))
chkr := checker.New(repo) chkr := checker.New(repo)
hints, errs := chkr.LoadIndex() hints, errs := chkr.LoadIndex()
@ -145,7 +145,7 @@ func TestUnreferencedBlobs(t *testing.T) {
repo := OpenLocalRepo(t, repodir) repo := OpenLocalRepo(t, repodir)
snID := "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02" snID := "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02"
OK(t, repo.Backend().Remove(backend.Snapshot, snID)) OK(t, repo.Backend().Remove(restic.SnapshotFile, snID))
unusedBlobsBySnapshot := backend.IDs{ unusedBlobsBySnapshot := backend.IDs{
ParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"), ParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"),
@ -216,7 +216,7 @@ type errorBackend struct {
ProduceErrors bool ProduceErrors bool
} }
func (b errorBackend) Load(h backend.Handle, p []byte, off int64) (int, error) { func (b errorBackend) Load(h restic.Handle, p []byte, off int64) (int, error) {
fmt.Printf("load %v\n", h) fmt.Printf("load %v\n", h)
n, err := b.Backend.Load(h, p, off) n, err := b.Backend.Load(h, p, off)

View File

@ -1,11 +1,10 @@
package repository package restic
import ( import (
"crypto/rand" "crypto/rand"
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
"io" "io"
"restic"
"testing" "testing"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -31,12 +30,12 @@ const RepoVersion = 1
// JSONUnpackedSaver saves unpacked JSON. // JSONUnpackedSaver saves unpacked JSON.
type JSONUnpackedSaver interface { type JSONUnpackedSaver interface {
SaveJSONUnpacked(restic.FileType, interface{}) (restic.ID, error) SaveJSONUnpacked(FileType, interface{}) (ID, error)
} }
// JSONUnpackedLoader loads unpacked JSON. // JSONUnpackedLoader loads unpacked JSON.
type JSONUnpackedLoader interface { type JSONUnpackedLoader interface {
LoadJSONUnpacked(restic.FileType, restic.ID, interface{}) error LoadJSONUnpacked(FileType, ID, interface{}) error
} }
// CreateConfig creates a config file with a randomly selected polynomial and // CreateConfig creates a config file with a randomly selected polynomial and
@ -87,7 +86,7 @@ func LoadConfig(r JSONUnpackedLoader) (Config, error) {
cfg Config cfg Config
) )
err := r.LoadJSONUnpacked(restic.ConfigFile, restic.ID{}, &cfg) err := r.LoadJSONUnpacked(ConfigFile, ID{}, &cfg)
if err != nil { if err != nil {
return Config{}, err return Config{}, err
} }

View File

@ -1,10 +1,9 @@
package repository_test package restic_test
import ( import (
"restic" "restic"
"testing" "testing"
"restic/repository"
. "restic/test" . "restic/test"
) )
@ -21,18 +20,18 @@ func (l loader) LoadJSONUnpacked(t restic.FileType, id restic.ID, arg interface{
} }
func TestConfig(t *testing.T) { func TestConfig(t *testing.T) {
resultConfig := repository.Config{} resultConfig := restic.Config{}
save := func(tpe restic.FileType, arg interface{}) (restic.ID, error) { save := func(tpe restic.FileType, arg interface{}) (restic.ID, error) {
Assert(t, tpe == restic.ConfigFile, Assert(t, tpe == restic.ConfigFile,
"wrong backend type: got %v, wanted %v", "wrong backend type: got %v, wanted %v",
tpe, restic.ConfigFile) tpe, restic.ConfigFile)
cfg := arg.(repository.Config) cfg := arg.(restic.Config)
resultConfig = cfg resultConfig = cfg
return restic.ID{}, nil return restic.ID{}, nil
} }
cfg1, err := repository.CreateConfig() cfg1, err := restic.CreateConfig()
OK(t, err) OK(t, err)
_, err = saver(save).SaveJSONUnpacked(restic.ConfigFile, cfg1) _, err = saver(save).SaveJSONUnpacked(restic.ConfigFile, cfg1)
@ -42,12 +41,12 @@ func TestConfig(t *testing.T) {
"wrong backend type: got %v, wanted %v", "wrong backend type: got %v, wanted %v",
tpe, restic.ConfigFile) tpe, restic.ConfigFile)
cfg := arg.(*repository.Config) cfg := arg.(*restic.Config)
*cfg = resultConfig *cfg = resultConfig
return nil return nil
} }
cfg2, err := repository.LoadConfig(loader(load)) cfg2, err := restic.LoadConfig(loader(load))
OK(t, err) OK(t, err)
Assert(t, cfg1 == cfg2, Assert(t, cfg1 == cfg2,

View File

@ -15,18 +15,18 @@ import (
"restic/repository" "restic/repository"
) )
func loadIDSet(t testing.TB, filename string) BlobSet { func loadIDSet(t testing.TB, filename string) restic.BlobSet {
f, err := os.Open(filename) f, err := os.Open(filename)
if err != nil { if err != nil {
t.Logf("unable to open golden file %v: %v", filename, err) t.Logf("unable to open golden file %v: %v", filename, err)
return NewBlobSet() return restic.NewBlobSet()
} }
sc := bufio.NewScanner(f) sc := bufio.NewScanner(f)
blobs := NewBlobSet() blobs := restic.NewBlobSet()
for sc.Scan() { for sc.Scan() {
var h Handle var h restic.BlobHandle
err := json.Unmarshal([]byte(sc.Text()), &h) err := json.Unmarshal([]byte(sc.Text()), &h)
if err != nil { if err != nil {
t.Errorf("file %v contained invalid blob: %#v", filename, err) t.Errorf("file %v contained invalid blob: %#v", filename, err)
@ -43,14 +43,14 @@ func loadIDSet(t testing.TB, filename string) BlobSet {
return blobs return blobs
} }
func saveIDSet(t testing.TB, filename string, s BlobSet) { func saveIDSet(t testing.TB, filename string, s restic.BlobSet) {
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644) f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil { if err != nil {
t.Fatalf("unable to update golden file %v: %v", filename, err) t.Fatalf("unable to update golden file %v: %v", filename, err)
return return
} }
var hs Handles var hs restic.BlobHandles
for h := range s { for h := range s {
hs = append(hs, h) hs = append(hs, h)
} }
@ -83,16 +83,16 @@ func TestFindUsedBlobs(t *testing.T) {
repo, cleanup := repository.TestRepository(t) repo, cleanup := repository.TestRepository(t)
defer cleanup() defer cleanup()
var snapshots []*Snapshot var snapshots []*restic.Snapshot
for i := 0; i < findTestSnapshots; i++ { for i := 0; i < findTestSnapshots; i++ {
sn := TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth, 0) sn := restic.TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth, 0)
t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str()) t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str())
snapshots = append(snapshots, sn) snapshots = append(snapshots, sn)
} }
for i, sn := range snapshots { for i, sn := range snapshots {
usedBlobs := NewBlobSet() usedBlobs := restic.NewBlobSet()
err := restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, NewBlobSet()) err := restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, restic.NewBlobSet())
if err != nil { if err != nil {
t.Errorf("FindUsedBlobs returned error: %v", err) t.Errorf("FindUsedBlobs returned error: %v", err)
continue continue
@ -121,13 +121,13 @@ func BenchmarkFindUsedBlobs(b *testing.B) {
repo, cleanup := repository.TestRepository(b) repo, cleanup := repository.TestRepository(b)
defer cleanup() defer cleanup()
sn := TestCreateSnapshot(b, repo, findTestTime, findTestDepth, 0) sn := restic.TestCreateSnapshot(b, repo, findTestTime, findTestDepth, 0)
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
seen := NewBlobSet() seen := restic.NewBlobSet()
blobs := NewBlobSet() blobs := restic.NewBlobSet()
err := restic.FindUsedBlobs(repo, *sn.Tree, blobs, seen) err := restic.FindUsedBlobs(repo, *sn.Tree, blobs, seen)
if err != nil { if err != nil {
b.Error(err) b.Error(err)

View File

@ -65,7 +65,7 @@ func (sn *SnapshotsDir) updateCache(ctx context.Context) error {
sn.Lock() sn.Lock()
defer sn.Unlock() defer sn.Unlock()
for id := range sn.repo.List(backend.Snapshot, ctx.Done()) { for id := range sn.repo.List(restic.SnapshotFile, ctx.Done()) {
snapshot, err := restic.LoadSnapshot(sn.repo, id) snapshot, err := restic.LoadSnapshot(sn.repo, id)
if err != nil { if err != nil {
return err return err

View File

@ -103,7 +103,7 @@ func loadIndexJSON(repo types.Repository, id backend.ID) (*indexJSON, error) {
debug.Log("index.loadIndexJSON", "process index %v\n", id.Str()) debug.Log("index.loadIndexJSON", "process index %v\n", id.Str())
var idx indexJSON var idx indexJSON
err := repo.LoadJSONUnpacked(backend.Index, id, &idx) err := repo.LoadJSONUnpacked(restic.IndexFile, id, &idx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -126,7 +126,7 @@ func Load(repo types.Repository, p *restic.Progress) (*Index, error) {
index := newIndex() index := newIndex()
for id := range repo.List(backend.Index, done) { for id := range repo.List(restic.IndexFile, done) {
p.Report(restic.Stat{Blobs: 1}) p.Report(restic.Stat{Blobs: 1})
debug.Log("index.Load", "Load index %v", id.Str()) debug.Log("index.Load", "Load index %v", id.Str())
@ -335,5 +335,5 @@ func Save(repo types.Repository, packs map[backend.ID][]pack.Blob, supersedes ba
idx.Packs = append(idx.Packs, p) idx.Packs = append(idx.Packs, p)
} }
return repo.SaveJSONUnpacked(backend.Index, idx) return repo.SaveJSONUnpacked(restic.IndexFile, idx)
} }

View File

@ -28,7 +28,7 @@ func createFilledRepo(t testing.TB, snapshots int, dup float32) (*repository.Rep
} }
func validateIndex(t testing.TB, repo *repository.Repository, idx *Index) { func validateIndex(t testing.TB, repo *repository.Repository, idx *Index) {
for id := range repo.List(backend.Data, nil) { for id := range repo.List(restic.DataFile, nil) {
if _, ok := idx.Packs[id]; !ok { if _, ok := idx.Packs[id]; !ok {
t.Errorf("pack %v missing from index", id.Str()) t.Errorf("pack %v missing from index", id.Str())
} }
@ -197,7 +197,7 @@ func TestIndexSave(t *testing.T) {
for id := range idx.IndexIDs { for id := range idx.IndexIDs {
t.Logf("remove index %v", id.Str()) t.Logf("remove index %v", id.Str())
err = repo.Backend().Remove(backend.Index, id.String()) err = repo.Backend().Remove(restic.IndexFile, id.String())
if err != nil { if err != nil {
t.Errorf("error removing index %v: %v", id, err) t.Errorf("error removing index %v: %v", id, err)
} }
@ -235,7 +235,7 @@ func TestIndexAddRemovePack(t *testing.T) {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
packID := <-repo.List(backend.Data, done) packID := <-repo.List(restic.DataFile, done)
t.Logf("selected pack %v", packID.Str()) t.Logf("selected pack %v", packID.Str())
@ -298,7 +298,7 @@ func TestIndexLoadDocReference(t *testing.T) {
repo, cleanup := repository.TestRepository(t) repo, cleanup := repository.TestRepository(t)
defer cleanup() defer cleanup()
id, err := repo.SaveUnpacked(backend.Index, docExample) id, err := repo.SaveUnpacked(restic.IndexFile, docExample)
if err != nil { if err != nil {
t.Fatalf("SaveUnpacked() returned error %v", err) t.Fatalf("SaveUnpacked() returned error %v", err)
} }

View File

@ -11,7 +11,7 @@ type Backend struct {
CloseFn func() error CloseFn func() error
LoadFn func(h restic.Handle, p []byte, off int64) (int, error) LoadFn func(h restic.Handle, p []byte, off int64) (int, error)
SaveFn func(h restic.Handle, p []byte) error SaveFn func(h restic.Handle, p []byte) error
StatFn func(h restic.Handle) (restic.BlobInfo, error) StatFn func(h restic.Handle) (restic.FileInfo, error)
ListFn func(restic.FileType, <-chan struct{}) <-chan string ListFn func(restic.FileType, <-chan struct{}) <-chan string
RemoveFn func(restic.FileType, string) error RemoveFn func(restic.FileType, string) error
TestFn func(restic.FileType, string) (bool, error) TestFn func(restic.FileType, string) (bool, error)
@ -56,9 +56,9 @@ func (m *Backend) Save(h restic.Handle, p []byte) error {
} }
// Stat an object in the backend. // Stat an object in the backend.
func (m *Backend) Stat(h restic.Handle) (restic.BlobInfo, error) { func (m *Backend) Stat(h restic.Handle) (restic.FileInfo, error) {
if m.StatFn == nil { if m.StatFn == nil {
return restic.BlobInfo{}, errors.New("not implemented") return restic.FileInfo{}, errors.New("not implemented")
} }
return m.StatFn(h) return m.StatFn(h)

View File

@ -42,7 +42,7 @@ type Node struct {
tree *Tree tree *Tree
path string Path string `json:"-"`
err error err error
} }
@ -67,7 +67,7 @@ func (node Node) Tree() *Tree {
func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) { func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) {
mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky
node := &Node{ node := &Node{
path: path, Path: path,
Name: fi.Name(), Name: fi.Name(),
Mode: fi.Mode() & mask, Mode: fi.Mode() & mask,
ModTime: fi.ModTime(), ModTime: fi.ModTime(),
@ -370,15 +370,15 @@ func (node Node) sameContent(other Node) bool {
return true return true
} }
func (node *Node) isNewer(path string, fi os.FileInfo) bool { func (node *Node) IsNewer(path string, fi os.FileInfo) bool {
if node.FileType != "file" { if node.FileType != "file" {
debug.Log("node.isNewer", "node %v is newer: not file", path) debug.Log("node.IsNewer", "node %v is newer: not file", path)
return true return true
} }
tpe := nodeTypeFromFileInfo(fi) tpe := nodeTypeFromFileInfo(fi)
if node.Name != fi.Name() || node.FileType != tpe { if node.Name != fi.Name() || node.FileType != tpe {
debug.Log("node.isNewer", "node %v is newer: name or type changed", path) debug.Log("node.IsNewer", "node %v is newer: name or type changed", path)
return true return true
} }
@ -388,7 +388,7 @@ func (node *Node) isNewer(path string, fi os.FileInfo) bool {
if !ok { if !ok {
if node.ModTime != fi.ModTime() || if node.ModTime != fi.ModTime() ||
node.Size != size { node.Size != size {
debug.Log("node.isNewer", "node %v is newer: timestamp or size changed", path) debug.Log("node.IsNewer", "node %v is newer: timestamp or size changed", path)
return true return true
} }
return false return false
@ -400,11 +400,11 @@ func (node *Node) isNewer(path string, fi os.FileInfo) bool {
node.ChangeTime != changeTime(extendedStat) || node.ChangeTime != changeTime(extendedStat) ||
node.Inode != uint64(inode) || node.Inode != uint64(inode) ||
node.Size != size { node.Size != size {
debug.Log("node.isNewer", "node %v is newer: timestamp, size or inode changed", path) debug.Log("node.IsNewer", "node %v is newer: timestamp, size or inode changed", path)
return true return true
} }
debug.Log("node.isNewer", "node %v is not newer", path) debug.Log("node.IsNewer", "node %v is not newer", path)
return false return false
} }

View File

@ -9,7 +9,6 @@ import (
"time" "time"
"restic" "restic"
"restic/backend"
. "restic/test" . "restic/test"
) )
@ -75,7 +74,7 @@ var nodeTests = []restic.Node{
restic.Node{ restic.Node{
Name: "testFile", Name: "testFile",
FileType: "file", FileType: "file",
Content: []backend.ID{}, Content: restic.IDs{},
UID: uint32(os.Getuid()), UID: uint32(os.Getuid()),
GID: uint32(os.Getgid()), GID: uint32(os.Getgid()),
Mode: 0604, Mode: 0604,
@ -86,7 +85,7 @@ var nodeTests = []restic.Node{
restic.Node{ restic.Node{
Name: "testSuidFile", Name: "testSuidFile",
FileType: "file", FileType: "file",
Content: []backend.ID{}, Content: restic.IDs{},
UID: uint32(os.Getuid()), UID: uint32(os.Getuid()),
GID: uint32(os.Getgid()), GID: uint32(os.Getgid()),
Mode: 0755 | os.ModeSetuid, Mode: 0755 | os.ModeSetuid,
@ -97,7 +96,7 @@ var nodeTests = []restic.Node{
restic.Node{ restic.Node{
Name: "testSuidFile2", Name: "testSuidFile2",
FileType: "file", FileType: "file",
Content: []backend.ID{}, Content: restic.IDs{},
UID: uint32(os.Getuid()), UID: uint32(os.Getuid()),
GID: uint32(os.Getgid()), GID: uint32(os.Getgid()),
Mode: 0755 | os.ModeSetgid, Mode: 0755 | os.ModeSetgid,
@ -108,7 +107,7 @@ var nodeTests = []restic.Node{
restic.Node{ restic.Node{
Name: "testSticky", Name: "testSticky",
FileType: "file", FileType: "file",
Content: []backend.ID{}, Content: restic.IDs{},
UID: uint32(os.Getuid()), UID: uint32(os.Getuid()),
GID: uint32(os.Getgid()), GID: uint32(os.Getgid()),
Mode: 0755 | os.ModeSticky, Mode: 0755 | os.ModeSticky,

View File

@ -10,26 +10,12 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"restic/backend"
"restic/crypto" "restic/crypto"
) )
// Blob is a blob within a pack.
type Blob struct {
Type restic.BlobType
Length uint
ID restic.ID
Offset uint
}
func (b Blob) String() string {
return fmt.Sprintf("<Blob %v/%v len %v, off %v>",
b.ID.Str(), b.Type, b.Length, b.Offset)
}
// Packer is used to create a new Pack. // Packer is used to create a new Pack.
type Packer struct { type Packer struct {
blobs []Blob blobs []restic.Blob
bytes uint bytes uint
k *crypto.Key k *crypto.Key
@ -53,7 +39,7 @@ func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte) (int, error)
p.m.Lock() p.m.Lock()
defer p.m.Unlock() defer p.m.Unlock()
c := Blob{Type: t, ID: id} c := restic.Blob{Type: t, ID: id}
n, err := p.wr.Write(data) n, err := p.wr.Write(data)
c.Length = uint(n) c.Length = uint(n)
@ -64,13 +50,13 @@ func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte) (int, error)
return n, errors.Wrap(err, "Write") return n, errors.Wrap(err, "Write")
} }
var entrySize = uint(binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize) var entrySize = uint(binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + restic.IDSize)
// headerEntry is used with encoding/binary to read and write header entries // headerEntry is used with encoding/binary to read and write header entries
type headerEntry struct { type headerEntry struct {
Type uint8 Type uint8
Length uint32 Length uint32
ID [backend.IDSize]byte ID [restic.IDSize]byte
} }
// Finalize writes the header for all added blobs and finalizes the pack. // Finalize writes the header for all added blobs and finalizes the pack.
@ -167,7 +153,7 @@ func (p *Packer) Count() int {
} }
// Blobs returns the slice of blobs that have been written. // Blobs returns the slice of blobs that have been written.
func (p *Packer) Blobs() []Blob { func (p *Packer) Blobs() []restic.Blob {
p.m.Lock() p.m.Lock()
defer p.m.Unlock() defer p.m.Unlock()
@ -233,7 +219,7 @@ func readHeader(rd io.ReaderAt, size int64) ([]byte, error) {
} }
// List returns the list of entries found in a pack file. // List returns the list of entries found in a pack file.
func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []Blob, err error) { func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, err error) {
buf, err := readHeader(rd, size) buf, err := readHeader(rd, size)
if err != nil { if err != nil {
return nil, err return nil, err
@ -258,7 +244,7 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []Blob, err error)
return nil, errors.Wrap(err, "binary.Read") return nil, errors.Wrap(err, "binary.Read")
} }
entry := Blob{ entry := restic.Blob{
Length: uint(e.Length), Length: uint(e.Length),
ID: e.ID, ID: e.ID,
Offset: pos, Offset: pos,

View File

@ -7,6 +7,7 @@ import (
"encoding/binary" "encoding/binary"
"encoding/json" "encoding/json"
"io" "io"
"restic"
"testing" "testing"
"restic/backend" "restic/backend"
@ -126,9 +127,9 @@ func TestUnpackReadSeeker(t *testing.T) {
b := mem.New() b := mem.New()
id := backend.Hash(packData) id := backend.Hash(packData)
handle := backend.Handle{Type: backend.Data, Name: id.String()} handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
OK(t, b.Save(handle, packData)) OK(t, b.Save(handle, packData))
verifyBlobs(t, bufs, k, backend.ReaderAt(b, handle), packSize) verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize)
} }
func TestShortPack(t *testing.T) { func TestShortPack(t *testing.T) {
@ -139,7 +140,7 @@ func TestShortPack(t *testing.T) {
b := mem.New() b := mem.New()
id := backend.Hash(packData) id := backend.Hash(packData)
handle := backend.Handle{Type: backend.Data, Name: id.String()} handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
OK(t, b.Save(handle, packData)) OK(t, b.Save(handle, packData))
verifyBlobs(t, bufs, k, backend.ReaderAt(b, handle), packSize) verifyBlobs(t, bufs, k, backend.ReaderAt(b, handle), packSize)
} }

View File

@ -1,4 +1,4 @@
package backend package restic
import ( import (
"io" "io"

View File

@ -1,7 +1,5 @@
package restic package restic
import "github.com/restic/chunker"
// Repository stores data in a backend. It provides high-level functions and // Repository stores data in a backend. It provides high-level functions and
// transparently encrypts/decrypts data. // transparently encrypts/decrypts data.
type Repository interface { type Repository interface {
@ -9,12 +7,13 @@ type Repository interface {
// Backend returns the backend used by the repository // Backend returns the backend used by the repository
Backend() Backend Backend() Backend
SetIndex(interface{}) SetIndex(Index)
Index() Index Index() Index
SaveFullIndex() error SaveFullIndex() error
SaveJSON(BlobType, interface{}) (ID, error) SaveJSON(BlobType, interface{}) (ID, error)
SaveUnpacked(FileType, []byte) (ID, error)
Config() Config Config() Config
@ -34,13 +33,13 @@ type Repository interface {
Flush() error Flush() error
} }
// Lister allows listing files in a backend.
type Lister interface {
List(FileType, <-chan struct{}) <-chan string
}
// Index keeps track of the blobs are stored within files. // Index keeps track of the blobs are stored within files.
type Index interface { type Index interface {
Has(ID, BlobType) bool Has(ID, BlobType) bool
Lookup(ID, BlobType) ([]PackedBlob, error) Lookup(ID, BlobType) ([]PackedBlob, error)
} }
// Config stores information about the repository.
type Config interface {
ChunkerPolynomial() chunker.Pol
}

View File

@ -3,7 +3,6 @@ package repository
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"restic" "restic"
"sync" "sync"
@ -40,7 +39,7 @@ func NewIndex() *Index {
} }
} }
func (idx *Index) store(blob PackedBlob) { func (idx *Index) store(blob restic.PackedBlob) {
newEntry := indexEntry{ newEntry := indexEntry{
packID: blob.PackID, packID: blob.PackID,
offset: blob.Offset, offset: blob.Offset,
@ -97,7 +96,7 @@ var IndexFull = func(idx *Index) bool {
// Store remembers the id and pack in the index. An existing entry will be // Store remembers the id and pack in the index. An existing entry will be
// silently overwritten. // silently overwritten.
func (idx *Index) Store(blob PackedBlob) { func (idx *Index) Store(blob restic.PackedBlob) {
idx.m.Lock() idx.m.Lock()
defer idx.m.Unlock() defer idx.m.Unlock()
@ -110,25 +109,27 @@ func (idx *Index) Store(blob PackedBlob) {
idx.store(blob) idx.store(blob)
} }
// Lookup queries the index for the blob ID and returns a PackedBlob. // Lookup queries the index for the blob ID and returns a restic.PackedBlob.
func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []PackedBlob, err error) { func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob, err error) {
idx.m.Lock() idx.m.Lock()
defer idx.m.Unlock() defer idx.m.Unlock()
h := restic.BlobHandle{ID: id, Type: tpe} h := restic.BlobHandle{ID: id, Type: tpe}
if packs, ok := idx.pack[h]; ok { if packs, ok := idx.pack[h]; ok {
blobs = make([]PackedBlob, 0, len(packs)) blobs = make([]restic.PackedBlob, 0, len(packs))
for _, p := range packs { for _, p := range packs {
debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d", debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d",
id.Str(), p.packID.Str(), p.offset, p.length) id.Str(), p.packID.Str(), p.offset, p.length)
blob := PackedBlob{ blob := restic.PackedBlob{
Type: tpe, Blob: restic.Blob{
Length: p.length, Type: tpe,
ID: id, Length: p.length,
Offset: p.offset, ID: id,
Offset: p.offset,
},
PackID: p.packID, PackID: p.packID,
} }
@ -143,18 +144,20 @@ func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []PackedBlob,
} }
// ListPack returns a list of blobs contained in a pack. // ListPack returns a list of blobs contained in a pack.
func (idx *Index) ListPack(id restic.ID) (list []PackedBlob) { func (idx *Index) ListPack(id restic.ID) (list []restic.PackedBlob) {
idx.m.Lock() idx.m.Lock()
defer idx.m.Unlock() defer idx.m.Unlock()
for h, packList := range idx.pack { for h, packList := range idx.pack {
for _, entry := range packList { for _, entry := range packList {
if entry.packID == id { if entry.packID == id {
list = append(list, PackedBlob{ list = append(list, restic.PackedBlob{
ID: h.ID, Blob: restic.Blob{
Type: h.Type, ID: h.ID,
Length: entry.length, Type: h.Type,
Offset: entry.offset, Length: entry.length,
Offset: entry.offset,
},
PackID: entry.packID, PackID: entry.packID,
}) })
} }
@ -182,7 +185,7 @@ func (idx *Index) LookupSize(id restic.ID, tpe restic.BlobType) (cleartextLength
return 0, err return 0, err
} }
return blobs[0].PlaintextLength(), nil return blobs[0].Length - crypto.Extension, nil
} }
// Supersedes returns the list of indexes this index supersedes, if any. // Supersedes returns the list of indexes this index supersedes, if any.
@ -204,32 +207,13 @@ func (idx *Index) AddToSupersedes(ids ...restic.ID) error {
return nil return nil
} }
// PackedBlob is a blob already saved within a pack.
type PackedBlob struct {
Type restic.BlobType
Length uint
ID restic.ID
Offset uint
PackID restic.ID
}
func (pb PackedBlob) String() string {
return fmt.Sprintf("<PackedBlob %v type %v in pack %v: len %v, offset %v",
pb.ID.Str(), pb.Type, pb.PackID.Str(), pb.Length, pb.Offset)
}
// PlaintextLength returns the number of bytes the blob's plaintext occupies.
func (pb PackedBlob) PlaintextLength() uint {
return pb.Length - crypto.Extension
}
// Each returns a channel that yields all blobs known to the index. If done is // Each returns a channel that yields all blobs known to the index. If done is
// closed, the background goroutine terminates. This blocks any modification of // closed, the background goroutine terminates. This blocks any modification of
// the index. // the index.
func (idx *Index) Each(done chan struct{}) <-chan PackedBlob { func (idx *Index) Each(done chan struct{}) <-chan restic.PackedBlob {
idx.m.Lock() idx.m.Lock()
ch := make(chan PackedBlob) ch := make(chan restic.PackedBlob)
go func() { go func() {
defer idx.m.Unlock() defer idx.m.Unlock()
@ -242,11 +226,13 @@ func (idx *Index) Each(done chan struct{}) <-chan PackedBlob {
select { select {
case <-done: case <-done:
return return
case ch <- PackedBlob{ case ch <- restic.PackedBlob{
ID: h.ID, Blob: restic.Blob{
Type: h.Type, ID: h.ID,
Offset: blob.offset, Type: h.Type,
Length: blob.length, Offset: blob.offset,
Length: blob.length,
},
PackID: blob.packID, PackID: blob.packID,
}: }:
} }
@ -497,11 +483,13 @@ func DecodeIndex(rd io.Reader) (idx *Index, err error) {
idx = NewIndex() idx = NewIndex()
for _, pack := range idxJSON.Packs { for _, pack := range idxJSON.Packs {
for _, blob := range pack.Blobs { for _, blob := range pack.Blobs {
idx.store(PackedBlob{ idx.store(restic.PackedBlob{
Type: blob.Type, Blob: restic.Blob{
ID: blob.ID, Type: blob.Type,
Offset: blob.Offset, ID: blob.ID,
Length: blob.Length, Offset: blob.Offset,
Length: blob.Length,
},
PackID: pack.ID, PackID: pack.ID,
}) })
} }
@ -528,12 +516,14 @@ func DecodeOldIndex(rd io.Reader) (idx *Index, err error) {
idx = NewIndex() idx = NewIndex()
for _, pack := range list { for _, pack := range list {
for _, blob := range pack.Blobs { for _, blob := range pack.Blobs {
idx.store(PackedBlob{ idx.store(restic.PackedBlob{
Type: blob.Type, Blob: restic.Blob{
ID: blob.ID, Type: blob.Type,
ID: blob.ID,
Offset: blob.Offset,
Length: blob.Length,
},
PackID: pack.ID, PackID: pack.ID,
Offset: blob.Offset,
Length: blob.Length,
}) })
} }
} }

View File

@ -33,11 +33,8 @@ func RebuildIndex(repo restic.Repository) error {
res := job.Result.(list.Result) res := job.Result.(list.Result)
for _, entry := range res.Entries() { for _, entry := range res.Entries() {
pb := PackedBlob{ pb := restic.PackedBlob{
ID: entry.ID, Blob: entry,
Type: entry.Type,
Length: entry.Length,
Offset: entry.Offset,
PackID: res.PackID(), PackID: res.PackID(),
} }
idx.Store(pb) idx.Store(pb)

View File

@ -116,7 +116,7 @@ func SearchKey(s *Repository, password string, maxKeys int) (*Key, error) {
// try at most maxKeysForSearch keys in repo // try at most maxKeysForSearch keys in repo
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
for name := range s.Backend().List(backend.Key, done) { for name := range s.Backend().List(restic.KeyFile, done) {
if maxKeys > 0 && checked > maxKeys { if maxKeys > 0 && checked > maxKeys {
return nil, ErrMaxKeysReached return nil, ErrMaxKeysReached
} }
@ -226,8 +226,8 @@ func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error)
// store in repository and return // store in repository and return
h := restic.Handle{ h := restic.Handle{
Type: backend.Key, FileType: restic.KeyFile,
Name: restic.Hash(buf).String(), Name: restic.Hash(buf).String(),
} }
err = s.be.Save(h, buf) err = s.be.Save(h, buf)

View File

@ -7,7 +7,6 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"restic/debug" "restic/debug"
"restic/pack"
) )
// MasterIndex is a collection of indexes and IDs of chunks that are in the process of being saved. // MasterIndex is a collection of indexes and IDs of chunks that are in the process of being saved.
@ -22,7 +21,7 @@ func NewMasterIndex() *MasterIndex {
} }
// Lookup queries all known Indexes for the ID and returns the first match. // Lookup queries all known Indexes for the ID and returns the first match.
func (mi *MasterIndex) Lookup(id restic.ID, tpe restic.BlobType) (blobs []PackedBlob, err error) { func (mi *MasterIndex) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob, err error) {
mi.idxMutex.RLock() mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock() defer mi.idxMutex.RUnlock()
@ -58,7 +57,7 @@ func (mi *MasterIndex) LookupSize(id restic.ID, tpe restic.BlobType) (uint, erro
// ListPack returns the list of blobs in a pack. The first matching index is // ListPack returns the list of blobs in a pack. The first matching index is
// returned, or nil if no index contains information about the pack id. // returned, or nil if no index contains information about the pack id.
func (mi *MasterIndex) ListPack(id restic.ID) (list []PackedBlob) { func (mi *MasterIndex) ListPack(id restic.ID) (list []restic.PackedBlob) {
mi.idxMutex.RLock() mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock() defer mi.idxMutex.RUnlock()

View File

@ -115,7 +115,7 @@ func (r *Repository) savePacker(p *pack.Packer) error {
} }
id := restic.Hash(data) id := restic.Hash(data)
h := restic.Handle{Type: restic.DataFile, Name: id.String()} h := restic.Handle{FileType: restic.DataFile, Name: id.String()}
err = r.be.Save(h, data) err = r.be.Save(h, data)
if err != nil { if err != nil {
@ -133,12 +133,14 @@ func (r *Repository) savePacker(p *pack.Packer) error {
// update blobs in the index // update blobs in the index
for _, b := range p.Blobs() { for _, b := range p.Blobs() {
debug.Log("Repo.savePacker", " updating blob %v to pack %v", b.ID.Str(), id.Str()) debug.Log("Repo.savePacker", " updating blob %v to pack %v", b.ID.Str(), id.Str())
r.idx.Current().Store(PackedBlob{ r.idx.Current().Store(restic.PackedBlob{
Type: b.Type, Blob: restic.Blob{
ID: b.ID, Type: b.Type,
ID: b.ID,
Offset: b.Offset,
Length: uint(b.Length),
},
PackID: id, PackID: id,
Offset: b.Offset,
Length: uint(b.Length),
}) })
} }

View File

@ -4,7 +4,6 @@ import (
"restic" "restic"
"sync" "sync"
"restic/backend"
"restic/debug" "restic/debug"
) )
@ -22,14 +21,14 @@ func closeIfOpen(ch chan struct{}) {
// processing stops. If done is closed, the function should return. // processing stops. If done is closed, the function should return.
type ParallelWorkFunc func(id string, done <-chan struct{}) error type ParallelWorkFunc func(id string, done <-chan struct{}) error
// ParallelIDWorkFunc gets one backend.ID to work on. If an error is returned, // ParallelIDWorkFunc gets one restic.ID to work on. If an error is returned,
// processing stops. If done is closed, the function should return. // processing stops. If done is closed, the function should return.
type ParallelIDWorkFunc func(id restic.ID, done <-chan struct{}) error type ParallelIDWorkFunc func(id restic.ID, done <-chan struct{}) error
// FilesInParallel runs n workers of f in parallel, on the IDs that // FilesInParallel runs n workers of f in parallel, on the IDs that
// repo.List(t) yield. If f returns an error, the process is aborted and the // repo.List(t) yield. If f returns an error, the process is aborted and the
// first error is returned. // first error is returned.
func FilesInParallel(repo backend.Lister, t restic.FileType, n uint, f ParallelWorkFunc) error { func FilesInParallel(repo restic.Lister, t restic.FileType, n uint, f ParallelWorkFunc) error {
done := make(chan struct{}) done := make(chan struct{})
defer closeIfOpen(done) defer closeIfOpen(done)
@ -76,12 +75,12 @@ func FilesInParallel(repo backend.Lister, t restic.FileType, n uint, f ParallelW
return nil return nil
} }
// ParallelWorkFuncParseID converts a function that takes a backend.ID to a // ParallelWorkFuncParseID converts a function that takes a restic.ID to a
// function that takes a string. Filenames that do not parse as a backend.ID // function that takes a string. Filenames that do not parse as a restic.ID
// are ignored. // are ignored.
func ParallelWorkFuncParseID(f ParallelIDWorkFunc) ParallelWorkFunc { func ParallelWorkFuncParseID(f ParallelIDWorkFunc) ParallelWorkFunc {
return func(s string, done <-chan struct{}) error { return func(s string, done <-chan struct{}) error {
id, err := backend.ParseID(s) id, err := restic.ParseID(s)
if err != nil { if err != nil {
debug.Log("repository.ParallelWorkFuncParseID", "invalid ID %q: %v", id, err) debug.Log("repository.ParallelWorkFuncParseID", "invalid ID %q: %v", id, err)
return err return err

View File

@ -15,13 +15,13 @@ import (
// these packs. Each pack is loaded and the blobs listed in keepBlobs is saved // these packs. Each pack is loaded and the blobs listed in keepBlobs is saved
// into a new pack. Afterwards, the packs are removed. This operation requires // into a new pack. Afterwards, the packs are removed. This operation requires
// an exclusive lock on the repo. // an exclusive lock on the repo.
func Repack(repo *Repository, packs restic.IDSet, keepBlobs pack.BlobSet) (err error) { func Repack(repo *Repository, packs restic.IDSet, keepBlobs restic.BlobSet) (err error) {
debug.Log("Repack", "repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs)) debug.Log("Repack", "repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs))
buf := make([]byte, 0, maxPackSize) buf := make([]byte, 0, maxPackSize)
for packID := range packs { for packID := range packs {
// load the complete pack // load the complete pack
h := restic.Handle{Type: restic.DataFile, Name: packID.String()} h := restic.Handle{FileType: restic.DataFile, Name: packID.String()}
l, err := repo.Backend().Load(h, buf[:cap(buf)], 0) l, err := repo.Backend().Load(h, buf[:cap(buf)], 0)
if errors.Cause(err) == io.ErrUnexpectedEOF { if errors.Cause(err) == io.ErrUnexpectedEOF {
@ -43,7 +43,7 @@ func Repack(repo *Repository, packs restic.IDSet, keepBlobs pack.BlobSet) (err e
debug.Log("Repack", "processing pack %v, blobs: %v", packID.Str(), len(blobs)) debug.Log("Repack", "processing pack %v, blobs: %v", packID.Str(), len(blobs))
var plaintext []byte var plaintext []byte
for _, entry := range blobs { for _, entry := range blobs {
h := pack.Handle{ID: entry.ID, Type: entry.Type} h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
if !keepBlobs.Has(h) { if !keepBlobs.Has(h) {
continue continue
} }

View File

@ -19,7 +19,7 @@ import (
// Repository is used to access a repository in a backend. // Repository is used to access a repository in a backend.
type Repository struct { type Repository struct {
be restic.Backend be restic.Backend
Config Config cfg restic.Config
key *crypto.Key key *crypto.Key
keyName string keyName string
idx *MasterIndex idx *MasterIndex
@ -38,17 +38,21 @@ func New(be restic.Backend) *Repository {
return repo return repo
} }
func (r *Repository) Config() restic.Config {
return r.cfg
}
// Find loads the list of all blobs of type t and searches for names which start // Find loads the list of all blobs of type t and searches for names which start
// with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If // with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If
// more than one is found, nil and ErrMultipleIDMatches is returned. // more than one is found, nil and ErrMultipleIDMatches is returned.
func (r *Repository) Find(t restic.FileType, prefix string) (string, error) { func (r *Repository) Find(t restic.FileType, prefix string) (string, error) {
return backend.Find(r.be, t, prefix) return restic.Find(r.be, t, prefix)
} }
// PrefixLength returns the number of bytes required so that all prefixes of // PrefixLength returns the number of bytes required so that all prefixes of
// all IDs of type t are unique. // all IDs of type t are unique.
func (r *Repository) PrefixLength(t restic.FileType) (int, error) { func (r *Repository) PrefixLength(t restic.FileType) (int, error) {
return backend.PrefixLength(r.be, t) return restic.PrefixLength(r.be, t)
} }
// LoadAndDecrypt loads and decrypts data identified by t and id from the // LoadAndDecrypt loads and decrypts data identified by t and id from the
@ -56,7 +60,7 @@ func (r *Repository) PrefixLength(t restic.FileType) (int, error) {
func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, error) { func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, error) {
debug.Log("Repo.Load", "load %v with id %v", t, id.Str()) debug.Log("Repo.Load", "load %v with id %v", t, id.Str())
h := restic.Handle{Type: t, Name: id.String()} h := restic.Handle{FileType: t, Name: id.String()}
buf, err := backend.LoadAll(r.be, h, nil) buf, err := backend.LoadAll(r.be, h, nil)
if err != nil { if err != nil {
debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err) debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err)
@ -112,7 +116,7 @@ func (r *Repository) LoadBlob(id restic.ID, t restic.BlobType, plaintextBuf []by
} }
// load blob from pack // load blob from pack
h := restic.Handle{Type: restic.DataFile, Name: blob.PackID.String()} h := restic.Handle{FileType: restic.DataFile, Name: blob.PackID.String()}
ciphertextBuf := make([]byte, blob.Length) ciphertextBuf := make([]byte, blob.Length)
n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset)) n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset))
if err != nil { if err != nil {
@ -274,7 +278,7 @@ func (r *Repository) SaveUnpacked(t restic.FileType, p []byte) (id restic.ID, er
} }
id = restic.Hash(ciphertext) id = restic.Hash(ciphertext)
h := restic.Handle{Type: t, Name: id.String()} h := restic.Handle{FileType: t, Name: id.String()}
err = r.be.Save(h, ciphertext) err = r.be.Save(h, ciphertext)
if err != nil { if err != nil {
@ -309,13 +313,13 @@ func (r *Repository) Backend() restic.Backend {
} }
// Index returns the currently used MasterIndex. // Index returns the currently used MasterIndex.
func (r *Repository) Index() *MasterIndex { func (r *Repository) Index() restic.Index {
return r.idx return r.idx
} }
// SetIndex instructs the repository to use the given index. // SetIndex instructs the repository to use the given index.
func (r *Repository) SetIndex(i *MasterIndex) { func (r *Repository) SetIndex(i restic.Index) {
r.idx = i r.idx = i.(*MasterIndex)
} }
// SaveIndex saves an index in the repository. // SaveIndex saves an index in the repository.
@ -423,7 +427,7 @@ func (r *Repository) SearchKey(password string, maxKeys int) error {
r.key = key.master r.key = key.master
r.packerManager.key = key.master r.packerManager.key = key.master
r.keyName = key.Name() r.keyName = key.Name()
r.Config, err = LoadConfig(r) r.cfg, err = restic.LoadConfig(r)
return err return err
} }
@ -438,7 +442,7 @@ func (r *Repository) Init(password string) error {
return errors.New("repository master key and config already initialized") return errors.New("repository master key and config already initialized")
} }
cfg, err := CreateConfig() cfg, err := restic.CreateConfig()
if err != nil { if err != nil {
return err return err
} }
@ -448,7 +452,7 @@ func (r *Repository) Init(password string) error {
// init creates a new master key with the supplied password and uses it to save // init creates a new master key with the supplied password and uses it to save
// the config into the repo. // the config into the repo.
func (r *Repository) init(password string, cfg Config) error { func (r *Repository) init(password string, cfg restic.Config) error {
key, err := createMasterKey(r, password) key, err := createMasterKey(r, password)
if err != nil { if err != nil {
return err return err
@ -457,7 +461,7 @@ func (r *Repository) init(password string, cfg Config) error {
r.key = key.master r.key = key.master
r.packerManager.key = key.master r.packerManager.key = key.master
r.keyName = key.Name() r.keyName = key.Name()
r.Config = cfg r.cfg = cfg
_, err = r.SaveJSONUnpacked(restic.ConfigFile, cfg) _, err = r.SaveJSONUnpacked(restic.ConfigFile, cfg)
return err return err
} }
@ -528,7 +532,7 @@ func (r *Repository) list(t restic.FileType, done <-chan struct{}, out chan<- re
// input channel closed, we're done // input channel closed, we're done
return return
} }
id, err = backend.ParseID(strID) id, err = restic.ParseID(strID)
if err != nil { if err != nil {
// ignore invalid IDs // ignore invalid IDs
continue continue
@ -554,15 +558,15 @@ func (r *Repository) List(t restic.FileType, done <-chan struct{}) <-chan restic
// ListPack returns the list of blobs saved in the pack id and the length of // ListPack returns the list of blobs saved in the pack id and the length of
// the file as stored in the backend. // the file as stored in the backend.
func (r *Repository) ListPack(id restic.ID) ([]pack.Blob, int64, error) { func (r *Repository) ListPack(id restic.ID) ([]restic.Blob, int64, error) {
h := restic.Handle{Type: restic.DataFile, Name: id.String()} h := restic.Handle{FileType: restic.DataFile, Name: id.String()}
blobInfo, err := r.Backend().Stat(h) blobInfo, err := r.Backend().Stat(h)
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
} }
blobs, err := pack.List(r.Key(), backend.ReaderAt(r.Backend(), h), blobInfo.Size) blobs, err := pack.List(r.Key(), restic.ReaderAt(r.Backend(), h), blobInfo.Size)
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
} }

View File

@ -47,7 +47,7 @@ func TestRepositoryWithBackend(t testing.TB, be restic.Backend) (r *Repository,
r = New(be) r = New(be)
cfg := TestCreateConfig(t, testChunkerPol) cfg := restic.TestCreateConfig(t, testChunkerPol)
err := r.init(TestPassword, cfg) err := r.init(TestPassword, cfg)
if err != nil { if err != nil {
t.Fatalf("TestRepository(): initialize repo failed: %v", err) t.Fatalf("TestRepository(): initialize repo failed: %v", err)

View File

@ -1,20 +1,18 @@
package restic package restic_test
import ( import (
"encoding/json" "encoding/json"
"flag"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"path/filepath" "path/filepath"
"reflect" "reflect"
"restic"
"sort" "sort"
"testing" "testing"
"time" "time"
) )
var updateGoldenFiles = flag.Bool("update", false, "update golden files in testdata/") func parseTimeUTC(s string) time.Time {
func parseTime(s string) time.Time {
t, err := time.Parse("2006-01-02 15:04:05", s) t, err := time.Parse("2006-01-02 15:04:05", s)
if err != nil { if err != nil {
panic(err) panic(err)
@ -23,29 +21,29 @@ func parseTime(s string) time.Time {
return t.UTC() return t.UTC()
} }
var testFilterSnapshots = Snapshots{ var testFilterSnapshots = restic.Snapshots{
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-01 01:02:03"), Paths: []string{"/usr", "/bin"}}, {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-01 01:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "bar", Username: "testuser", Time: parseTime("2016-01-01 01:03:03"), Paths: []string{"/usr", "/bin"}}, {Hostname: "bar", Username: "testuser", Time: parseTimeUTC("2016-01-01 01:03:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-03 07:02:03"), Paths: []string{"/usr", "/bin"}}, {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-03 07:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "bar", Username: "testuser", Time: parseTime("2016-01-01 07:08:03"), Paths: []string{"/usr", "/bin"}}, {Hostname: "bar", Username: "testuser", Time: parseTimeUTC("2016-01-01 07:08:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 10:23:03"), Paths: []string{"/usr", "/bin"}}, {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 10:23:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 11:23:03"), Paths: []string{"/usr", "/bin"}}, {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 11:23:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:23:03"), Paths: []string{"/usr", "/bin"}}, {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 12:23:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:24:03"), Paths: []string{"/usr", "/bin"}}, {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 12:24:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:28:03"), Paths: []string{"/usr", "/bin"}}, {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 12:28:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:30:03"), Paths: []string{"/usr", "/bin"}}, {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 12:30:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 16:23:03"), Paths: []string{"/usr", "/bin"}}, {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 16:23:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-05 09:02:03"), Paths: []string{"/usr", "/bin"}}, {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-05 09:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-06 08:02:03"), Paths: []string{"/usr", "/bin"}}, {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-06 08:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-07 10:02:03"), Paths: []string{"/usr", "/bin"}}, {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-07 10:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "root", Time: parseTime("2016-01-08 20:02:03"), Paths: []string{"/usr", "/sbin"}}, {Hostname: "foo", Username: "root", Time: parseTimeUTC("2016-01-08 20:02:03"), Paths: []string{"/usr", "/sbin"}},
{Hostname: "foo", Username: "root", Time: parseTime("2016-01-09 21:02:03"), Paths: []string{"/usr", "/sbin"}}, {Hostname: "foo", Username: "root", Time: parseTimeUTC("2016-01-09 21:02:03"), Paths: []string{"/usr", "/sbin"}},
{Hostname: "bar", Username: "root", Time: parseTime("2016-01-12 21:02:03"), Paths: []string{"/usr", "/sbin"}}, {Hostname: "bar", Username: "root", Time: parseTimeUTC("2016-01-12 21:02:03"), Paths: []string{"/usr", "/sbin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-12 21:08:03"), Paths: []string{"/usr", "/bin"}}, {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-12 21:08:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-18 12:02:03"), Paths: []string{"/usr", "/bin"}}, {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-18 12:02:03"), Paths: []string{"/usr", "/bin"}},
} }
var filterTests = []SnapshotFilter{ var filterTests = []restic.SnapshotFilter{
{Hostname: "foo"}, {Hostname: "foo"},
{Username: "root"}, {Username: "root"},
{Hostname: "foo", Username: "root"}, {Hostname: "foo", Username: "root"},
@ -58,7 +56,7 @@ func TestFilterSnapshots(t *testing.T) {
sort.Sort(testFilterSnapshots) sort.Sort(testFilterSnapshots)
for i, f := range filterTests { for i, f := range filterTests {
res := FilterSnapshots(testFilterSnapshots, f) res := restic.FilterSnapshots(testFilterSnapshots, f)
goldenFilename := filepath.Join("testdata", fmt.Sprintf("filter_snapshots_%d", i)) goldenFilename := filepath.Join("testdata", fmt.Sprintf("filter_snapshots_%d", i))
@ -79,7 +77,7 @@ func TestFilterSnapshots(t *testing.T) {
continue continue
} }
var want Snapshots var want restic.Snapshots
err = json.Unmarshal(buf, &want) err = json.Unmarshal(buf, &want)
if !reflect.DeepEqual(res, want) { if !reflect.DeepEqual(res, want) {
@ -89,109 +87,109 @@ func TestFilterSnapshots(t *testing.T) {
} }
} }
var testExpireSnapshots = Snapshots{ var testExpireSnapshots = restic.Snapshots{
{Time: parseTime("2014-09-01 10:20:30")}, {Time: parseTimeUTC("2014-09-01 10:20:30")},
{Time: parseTime("2014-09-02 10:20:30")}, {Time: parseTimeUTC("2014-09-02 10:20:30")},
{Time: parseTime("2014-09-05 10:20:30")}, {Time: parseTimeUTC("2014-09-05 10:20:30")},
{Time: parseTime("2014-09-06 10:20:30")}, {Time: parseTimeUTC("2014-09-06 10:20:30")},
{Time: parseTime("2014-09-08 10:20:30")}, {Time: parseTimeUTC("2014-09-08 10:20:30")},
{Time: parseTime("2014-09-09 10:20:30")}, {Time: parseTimeUTC("2014-09-09 10:20:30")},
{Time: parseTime("2014-09-10 10:20:30")}, {Time: parseTimeUTC("2014-09-10 10:20:30")},
{Time: parseTime("2014-09-11 10:20:30")}, {Time: parseTimeUTC("2014-09-11 10:20:30")},
{Time: parseTime("2014-09-20 10:20:30")}, {Time: parseTimeUTC("2014-09-20 10:20:30")},
{Time: parseTime("2014-09-22 10:20:30")}, {Time: parseTimeUTC("2014-09-22 10:20:30")},
{Time: parseTime("2014-08-08 10:20:30")}, {Time: parseTimeUTC("2014-08-08 10:20:30")},
{Time: parseTime("2014-08-10 10:20:30")}, {Time: parseTimeUTC("2014-08-10 10:20:30")},
{Time: parseTime("2014-08-12 10:20:30")}, {Time: parseTimeUTC("2014-08-12 10:20:30")},
{Time: parseTime("2014-08-13 10:20:30")}, {Time: parseTimeUTC("2014-08-13 10:20:30")},
{Time: parseTime("2014-08-13 10:20:30")}, {Time: parseTimeUTC("2014-08-13 10:20:30")},
{Time: parseTime("2014-08-15 10:20:30")}, {Time: parseTimeUTC("2014-08-15 10:20:30")},
{Time: parseTime("2014-08-18 10:20:30")}, {Time: parseTimeUTC("2014-08-18 10:20:30")},
{Time: parseTime("2014-08-20 10:20:30")}, {Time: parseTimeUTC("2014-08-20 10:20:30")},
{Time: parseTime("2014-08-21 10:20:30")}, {Time: parseTimeUTC("2014-08-21 10:20:30")},
{Time: parseTime("2014-08-22 10:20:30")}, {Time: parseTimeUTC("2014-08-22 10:20:30")},
{Time: parseTime("2014-10-01 10:20:30")}, {Time: parseTimeUTC("2014-10-01 10:20:30")},
{Time: parseTime("2014-10-02 10:20:30")}, {Time: parseTimeUTC("2014-10-02 10:20:30")},
{Time: parseTime("2014-10-05 10:20:30")}, {Time: parseTimeUTC("2014-10-05 10:20:30")},
{Time: parseTime("2014-10-06 10:20:30")}, {Time: parseTimeUTC("2014-10-06 10:20:30")},
{Time: parseTime("2014-10-08 10:20:30")}, {Time: parseTimeUTC("2014-10-08 10:20:30")},
{Time: parseTime("2014-10-09 10:20:30")}, {Time: parseTimeUTC("2014-10-09 10:20:30")},
{Time: parseTime("2014-10-10 10:20:30")}, {Time: parseTimeUTC("2014-10-10 10:20:30")},
{Time: parseTime("2014-10-11 10:20:30")}, {Time: parseTimeUTC("2014-10-11 10:20:30")},
{Time: parseTime("2014-10-20 10:20:30")}, {Time: parseTimeUTC("2014-10-20 10:20:30")},
{Time: parseTime("2014-10-22 10:20:30")}, {Time: parseTimeUTC("2014-10-22 10:20:30")},
{Time: parseTime("2014-11-08 10:20:30")}, {Time: parseTimeUTC("2014-11-08 10:20:30")},
{Time: parseTime("2014-11-10 10:20:30")}, {Time: parseTimeUTC("2014-11-10 10:20:30")},
{Time: parseTime("2014-11-12 10:20:30")}, {Time: parseTimeUTC("2014-11-12 10:20:30")},
{Time: parseTime("2014-11-13 10:20:30")}, {Time: parseTimeUTC("2014-11-13 10:20:30")},
{Time: parseTime("2014-11-13 10:20:30")}, {Time: parseTimeUTC("2014-11-13 10:20:30")},
{Time: parseTime("2014-11-15 10:20:30")}, {Time: parseTimeUTC("2014-11-15 10:20:30")},
{Time: parseTime("2014-11-18 10:20:30")}, {Time: parseTimeUTC("2014-11-18 10:20:30")},
{Time: parseTime("2014-11-20 10:20:30")}, {Time: parseTimeUTC("2014-11-20 10:20:30")},
{Time: parseTime("2014-11-21 10:20:30")}, {Time: parseTimeUTC("2014-11-21 10:20:30")},
{Time: parseTime("2014-11-22 10:20:30")}, {Time: parseTimeUTC("2014-11-22 10:20:30")},
{Time: parseTime("2015-09-01 10:20:30")}, {Time: parseTimeUTC("2015-09-01 10:20:30")},
{Time: parseTime("2015-09-02 10:20:30")}, {Time: parseTimeUTC("2015-09-02 10:20:30")},
{Time: parseTime("2015-09-05 10:20:30")}, {Time: parseTimeUTC("2015-09-05 10:20:30")},
{Time: parseTime("2015-09-06 10:20:30")}, {Time: parseTimeUTC("2015-09-06 10:20:30")},
{Time: parseTime("2015-09-08 10:20:30")}, {Time: parseTimeUTC("2015-09-08 10:20:30")},
{Time: parseTime("2015-09-09 10:20:30")}, {Time: parseTimeUTC("2015-09-09 10:20:30")},
{Time: parseTime("2015-09-10 10:20:30")}, {Time: parseTimeUTC("2015-09-10 10:20:30")},
{Time: parseTime("2015-09-11 10:20:30")}, {Time: parseTimeUTC("2015-09-11 10:20:30")},
{Time: parseTime("2015-09-20 10:20:30")}, {Time: parseTimeUTC("2015-09-20 10:20:30")},
{Time: parseTime("2015-09-22 10:20:30")}, {Time: parseTimeUTC("2015-09-22 10:20:30")},
{Time: parseTime("2015-08-08 10:20:30")}, {Time: parseTimeUTC("2015-08-08 10:20:30")},
{Time: parseTime("2015-08-10 10:20:30")}, {Time: parseTimeUTC("2015-08-10 10:20:30")},
{Time: parseTime("2015-08-12 10:20:30")}, {Time: parseTimeUTC("2015-08-12 10:20:30")},
{Time: parseTime("2015-08-13 10:20:30")}, {Time: parseTimeUTC("2015-08-13 10:20:30")},
{Time: parseTime("2015-08-13 10:20:30")}, {Time: parseTimeUTC("2015-08-13 10:20:30")},
{Time: parseTime("2015-08-15 10:20:30")}, {Time: parseTimeUTC("2015-08-15 10:20:30")},
{Time: parseTime("2015-08-18 10:20:30")}, {Time: parseTimeUTC("2015-08-18 10:20:30")},
{Time: parseTime("2015-08-20 10:20:30")}, {Time: parseTimeUTC("2015-08-20 10:20:30")},
{Time: parseTime("2015-08-21 10:20:30")}, {Time: parseTimeUTC("2015-08-21 10:20:30")},
{Time: parseTime("2015-08-22 10:20:30")}, {Time: parseTimeUTC("2015-08-22 10:20:30")},
{Time: parseTime("2015-10-01 10:20:30")}, {Time: parseTimeUTC("2015-10-01 10:20:30")},
{Time: parseTime("2015-10-02 10:20:30")}, {Time: parseTimeUTC("2015-10-02 10:20:30")},
{Time: parseTime("2015-10-05 10:20:30")}, {Time: parseTimeUTC("2015-10-05 10:20:30")},
{Time: parseTime("2015-10-06 10:20:30")}, {Time: parseTimeUTC("2015-10-06 10:20:30")},
{Time: parseTime("2015-10-08 10:20:30")}, {Time: parseTimeUTC("2015-10-08 10:20:30")},
{Time: parseTime("2015-10-09 10:20:30")}, {Time: parseTimeUTC("2015-10-09 10:20:30")},
{Time: parseTime("2015-10-10 10:20:30")}, {Time: parseTimeUTC("2015-10-10 10:20:30")},
{Time: parseTime("2015-10-11 10:20:30")}, {Time: parseTimeUTC("2015-10-11 10:20:30")},
{Time: parseTime("2015-10-20 10:20:30")}, {Time: parseTimeUTC("2015-10-20 10:20:30")},
{Time: parseTime("2015-10-22 10:20:30")}, {Time: parseTimeUTC("2015-10-22 10:20:30")},
{Time: parseTime("2015-11-08 10:20:30")}, {Time: parseTimeUTC("2015-11-08 10:20:30")},
{Time: parseTime("2015-11-10 10:20:30")}, {Time: parseTimeUTC("2015-11-10 10:20:30")},
{Time: parseTime("2015-11-12 10:20:30")}, {Time: parseTimeUTC("2015-11-12 10:20:30")},
{Time: parseTime("2015-11-13 10:20:30")}, {Time: parseTimeUTC("2015-11-13 10:20:30")},
{Time: parseTime("2015-11-13 10:20:30")}, {Time: parseTimeUTC("2015-11-13 10:20:30")},
{Time: parseTime("2015-11-15 10:20:30")}, {Time: parseTimeUTC("2015-11-15 10:20:30")},
{Time: parseTime("2015-11-18 10:20:30")}, {Time: parseTimeUTC("2015-11-18 10:20:30")},
{Time: parseTime("2015-11-20 10:20:30")}, {Time: parseTimeUTC("2015-11-20 10:20:30")},
{Time: parseTime("2015-11-21 10:20:30")}, {Time: parseTimeUTC("2015-11-21 10:20:30")},
{Time: parseTime("2015-11-22 10:20:30")}, {Time: parseTimeUTC("2015-11-22 10:20:30")},
{Time: parseTime("2016-01-01 01:02:03")}, {Time: parseTimeUTC("2016-01-01 01:02:03")},
{Time: parseTime("2016-01-01 01:03:03")}, {Time: parseTimeUTC("2016-01-01 01:03:03")},
{Time: parseTime("2016-01-01 07:08:03")}, {Time: parseTimeUTC("2016-01-01 07:08:03")},
{Time: parseTime("2016-01-03 07:02:03")}, {Time: parseTimeUTC("2016-01-03 07:02:03")},
{Time: parseTime("2016-01-04 10:23:03")}, {Time: parseTimeUTC("2016-01-04 10:23:03")},
{Time: parseTime("2016-01-04 11:23:03")}, {Time: parseTimeUTC("2016-01-04 11:23:03")},
{Time: parseTime("2016-01-04 12:23:03")}, {Time: parseTimeUTC("2016-01-04 12:23:03")},
{Time: parseTime("2016-01-04 12:24:03")}, {Time: parseTimeUTC("2016-01-04 12:24:03")},
{Time: parseTime("2016-01-04 12:28:03")}, {Time: parseTimeUTC("2016-01-04 12:28:03")},
{Time: parseTime("2016-01-04 12:30:03")}, {Time: parseTimeUTC("2016-01-04 12:30:03")},
{Time: parseTime("2016-01-04 16:23:03")}, {Time: parseTimeUTC("2016-01-04 16:23:03")},
{Time: parseTime("2016-01-05 09:02:03")}, {Time: parseTimeUTC("2016-01-05 09:02:03")},
{Time: parseTime("2016-01-06 08:02:03")}, {Time: parseTimeUTC("2016-01-06 08:02:03")},
{Time: parseTime("2016-01-07 10:02:03")}, {Time: parseTimeUTC("2016-01-07 10:02:03")},
{Time: parseTime("2016-01-08 20:02:03")}, {Time: parseTimeUTC("2016-01-08 20:02:03")},
{Time: parseTime("2016-01-09 21:02:03")}, {Time: parseTimeUTC("2016-01-09 21:02:03")},
{Time: parseTime("2016-01-12 21:02:03")}, {Time: parseTimeUTC("2016-01-12 21:02:03")},
{Time: parseTime("2016-01-12 21:08:03")}, {Time: parseTimeUTC("2016-01-12 21:08:03")},
{Time: parseTime("2016-01-18 12:02:03")}, {Time: parseTimeUTC("2016-01-18 12:02:03")},
} }
var expireTests = []ExpirePolicy{ var expireTests = []restic.ExpirePolicy{
{}, {},
{Last: 10}, {Last: 10},
{Last: 15}, {Last: 15},
@ -214,7 +212,7 @@ var expireTests = []ExpirePolicy{
func TestApplyPolicy(t *testing.T) { func TestApplyPolicy(t *testing.T) {
for i, p := range expireTests { for i, p := range expireTests {
keep, remove := ApplyPolicy(testExpireSnapshots, p) keep, remove := restic.ApplyPolicy(testExpireSnapshots, p)
t.Logf("test %d: returned keep %v, remove %v (of %v) expired snapshots for policy %v", t.Logf("test %d: returned keep %v, remove %v (of %v) expired snapshots for policy %v",
i, len(keep), len(remove), len(testExpireSnapshots), p) i, len(keep), len(remove), len(testExpireSnapshots), p)
@ -255,7 +253,7 @@ func TestApplyPolicy(t *testing.T) {
continue continue
} }
var want Snapshots var want restic.Snapshots
err = json.Unmarshal(buf, &want) err = json.Unmarshal(buf, &want)
if !reflect.DeepEqual(keep, want) { if !reflect.DeepEqual(keep, want) {

View File

@ -8,7 +8,7 @@ import (
"testing" "testing"
"restic" "restic"
"restic/backend" "restic/archiver"
"restic/backend/local" "restic/backend/local"
"restic/repository" "restic/repository"
) )
@ -83,8 +83,8 @@ func TeardownRepo(repo *repository.Repository) {
} }
} }
func SnapshotDir(t testing.TB, repo *repository.Repository, path string, parent *backend.ID) *restic.Snapshot { func SnapshotDir(t testing.TB, repo *repository.Repository, path string, parent *restic.ID) *restic.Snapshot {
arch := restic.NewArchiver(repo) arch := archiver.New(repo)
sn, _, err := arch.Snapshot(nil, []string{path}, parent) sn, _, err := arch.Snapshot(nil, []string{path}, parent)
OK(t, err) OK(t, err)
return sn return sn

View File

@ -29,7 +29,7 @@ type fakeFileSystem struct {
// IDs is returned. // IDs is returned.
func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs IDs) { func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs IDs) {
blobs = IDs{} blobs = IDs{}
ch := chunker.New(rd, fs.repo.Config().ChunkerPolynomial()) ch := chunker.New(rd, fs.repo.Config().ChunkerPolynomial)
for { for {
chunk, err := ch.Next(getBuf()) chunk, err := ch.Next(getBuf())

View File

@ -97,7 +97,7 @@ func TestLoadTree(t *testing.T) {
// save tree // save tree
tree := restic.NewTree() tree := restic.NewTree()
id, err := repo.SaveJSON(TreeBlob, tree) id, err := repo.SaveJSON(restic.TreeBlob, tree)
OK(t, err) OK(t, err)
// save packs // save packs

View File

@ -1,20 +1,21 @@
package types package types
import ( import (
"restic"
"restic/backend" "restic/backend"
"restic/pack" "restic/pack"
) )
// Repository manages encrypted and packed data stored in a backend. // Repository manages encrypted and packed data stored in a backend.
type Repository interface { type Repository interface {
LoadJSONUnpacked(backend.Type, backend.ID, interface{}) error LoadJSONUnpacked(restic.FileType, backend.ID, interface{}) error
SaveJSONUnpacked(backend.Type, interface{}) (backend.ID, error) SaveJSONUnpacked(restic.FileType, interface{}) (backend.ID, error)
Lister Lister
} }
// Lister combines lists packs in a repo and blobs in a pack. // Lister combines lists packs in a repo and blobs in a pack.
type Lister interface { type Lister interface {
List(backend.Type, <-chan struct{}) <-chan backend.ID List(restic.FileType, <-chan struct{}) <-chan backend.ID
ListPack(backend.ID) ([]pack.Blob, int64, error) ListPack(backend.ID) ([]pack.Blob, int64, error)
} }

View File

@ -8,7 +8,7 @@ import (
"time" "time"
"restic" "restic"
"restic/backend" "restic/archiver"
"restic/pipe" "restic/pipe"
"restic/repository" "restic/repository"
. "restic/test" . "restic/test"
@ -22,7 +22,7 @@ func TestWalkTree(t *testing.T) {
OK(t, err) OK(t, err)
// archive a few files // archive a few files
arch := restic.NewArchiver(repo) arch := archiver.New(repo)
sn, _, err := arch.Snapshot(nil, dirs, nil) sn, _, err := arch.Snapshot(nil, dirs, nil)
OK(t, err) OK(t, err)
@ -94,7 +94,7 @@ type delayRepo struct {
delay time.Duration delay time.Duration
} }
func (d delayRepo) LoadJSONPack(t BlobType, id backend.ID, dst interface{}) error { func (d delayRepo) LoadJSONPack(t restic.BlobType, id restic.ID, dst interface{}) error {
time.Sleep(d.delay) time.Sleep(d.delay)
return d.repo.LoadJSONPack(t, id, dst) return d.repo.LoadJSONPack(t, id, dst)
} }
@ -1344,7 +1344,7 @@ func TestDelayedWalkTree(t *testing.T) {
repo := OpenLocalRepo(t, repodir) repo := OpenLocalRepo(t, repodir)
OK(t, repo.LoadIndex()) OK(t, repo.LoadIndex())
root, err := backend.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da")
OK(t, err) OK(t, err)
dr := delayRepo{repo, 100 * time.Millisecond} dr := delayRepo{repo, 100 * time.Millisecond}
@ -1373,7 +1373,7 @@ func BenchmarkDelayedWalkTree(t *testing.B) {
repo := OpenLocalRepo(t, repodir) repo := OpenLocalRepo(t, repodir)
OK(t, repo.LoadIndex()) OK(t, repo.LoadIndex())
root, err := backend.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da")
OK(t, err) OK(t, err)
dr := delayRepo{repo, 10 * time.Millisecond} dr := delayRepo{repo, 10 * time.Millisecond}