mirror of https://github.com/restic/restic.git
Compare commits
50 Commits
e49d788836
...
09d2eabb94
Author | SHA1 | Date |
---|---|---|
Nick Cao | 09d2eabb94 | |
Michael Eischer | faffd15d13 | |
Michael Eischer | 347e9d0765 | |
Altan Orhon | 871ea1eaf3 | |
Michael Eischer | a7b5e09902 | |
Michael Eischer | 3f9d50865d | |
Michael Eischer | 5f263752d7 | |
Michael Eischer | 484dbb1cf4 | |
Michael Eischer | 940a3159b5 | |
Michael Eischer | 31624aeffd | |
Michael Eischer | 910927670f | |
Michael Eischer | 6f2a4dea21 | |
Michael Eischer | 699ef5e9de | |
Michael Eischer | eb710a28e8 | |
Michael Eischer | 86c7909f41 | |
Michael Eischer | 93135dc705 | |
Michael Eischer | 21a7cb405c | |
Michael Eischer | b15d867414 | |
Michael Eischer | 2e6c43c695 | |
Michael Eischer | f7632de3d6 | |
Michael Eischer | 6c6dceade3 | |
Michael Eischer | 10355c3fb6 | |
Michael Eischer | 228b35f074 | |
will-ca | 6aced61c72 | |
Michael Eischer | 4d22412e0c | |
coderwander | a82ed71de7 | |
Michael Eischer | 2173c69280 | |
Michael Eischer | 001bb71676 | |
Michael Eischer | c9191ea72c | |
Michael Eischer | 09587e6c08 | |
Michael Eischer | defd7ae729 | |
Michael Eischer | 038586dc9d | |
Michael Eischer | d8622c86eb | |
Michael Eischer | 8d507c1372 | |
Michael Eischer | 310db03c0e | |
Michael Eischer | 7d1b9cde34 | |
Michael Eischer | b25fc2c89d | |
Michael Eischer | c65459cd8a | |
Michael Eischer | eda9f7beb4 | |
Michael Eischer | 35277b7797 | |
Michael Eischer | 7ba5e95a82 | |
Michael Eischer | 4c9a10ca37 | |
Michael Eischer | 85e4021619 | |
Michael Eischer | fc3b548625 | |
Michael Eischer | df9d4b455d | |
Michael Eischer | 866ddf5698 | |
Michael Eischer | 32a234b67e | |
Michael Eischer | 739d11c2eb | |
Michael Eischer | 0747cf5319 | |
Nick Cao | c074dd4692 |
|
@ -0,0 +1,10 @@
|
||||||
|
Enhancement: Allow specifying `--host` via environment variable
|
||||||
|
|
||||||
|
Restic commands that operate on snapshots, such as `restic backup` and
|
||||||
|
`restic snapshots`, support the `--host` flag to specify the hostname for
|
||||||
|
grouoping snapshots. They now permit selecting the hostname via the
|
||||||
|
environment variable `RESTIC_HOST`. `--host` still takes precedence over the
|
||||||
|
environment variable.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4733
|
||||||
|
https://github.com/restic/restic/pull/4734
|
|
@ -0,0 +1,8 @@
|
||||||
|
Bugfix: Fix possible error on concurrent cache cleanup
|
||||||
|
|
||||||
|
If multiple restic processes concurrently cleaned up no longer existing files
|
||||||
|
from the cache, this could cause some of the processes to fail with an `no such
|
||||||
|
file or directory` error. This has been fixed.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4760
|
||||||
|
https://github.com/restic/restic/pull/4761
|
|
@ -0,0 +1,9 @@
|
||||||
|
Enhancement: allow customizing MaxConcurrentRequestsPerFile and MaxPacket parameters in the sftp backend
|
||||||
|
|
||||||
|
SFTP over long fat links suffers from poor performance due to its by default
|
||||||
|
overly small max payload size. But implementations such as OpenSSH tend to
|
||||||
|
accept vastly bigger packets, which improves backup performance for this
|
||||||
|
kind of situation. Restic now allows customizing the max packet size and
|
||||||
|
max concurrent requests per file parameters in the sftp backend.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4445
|
|
@ -1,89 +1,41 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"sync"
|
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/restic/restic/internal/debug"
|
"github.com/restic/restic/internal/debug"
|
||||||
)
|
)
|
||||||
|
|
||||||
var cleanupHandlers struct {
|
func createGlobalContext() context.Context {
|
||||||
sync.Mutex
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
list []func(code int) (int, error)
|
|
||||||
done bool
|
ch := make(chan os.Signal, 1)
|
||||||
ch chan os.Signal
|
go cleanupHandler(ch, cancel)
|
||||||
|
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
|
||||||
|
return ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
// cleanupHandler handles the SIGINT and SIGTERM signals.
|
||||||
cleanupHandlers.ch = make(chan os.Signal, 1)
|
func cleanupHandler(c <-chan os.Signal, cancel context.CancelFunc) {
|
||||||
go CleanupHandler(cleanupHandlers.ch)
|
s := <-c
|
||||||
signal.Notify(cleanupHandlers.ch, syscall.SIGINT, syscall.SIGTERM)
|
debug.Log("signal %v received, cleaning up", s)
|
||||||
}
|
Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s)
|
||||||
|
|
||||||
// AddCleanupHandler adds the function f to the list of cleanup handlers so
|
if val, _ := os.LookupEnv("RESTIC_DEBUG_STACKTRACE_SIGINT"); val != "" {
|
||||||
// that it is executed when all the cleanup handlers are run, e.g. when SIGINT
|
_, _ = os.Stderr.WriteString("\n--- STACKTRACE START ---\n\n")
|
||||||
// is received.
|
_, _ = os.Stderr.WriteString(debug.DumpStacktrace())
|
||||||
func AddCleanupHandler(f func(code int) (int, error)) {
|
_, _ = os.Stderr.WriteString("\n--- STACKTRACE END ---\n")
|
||||||
cleanupHandlers.Lock()
|
|
||||||
defer cleanupHandlers.Unlock()
|
|
||||||
|
|
||||||
// reset the done flag for integration tests
|
|
||||||
cleanupHandlers.done = false
|
|
||||||
|
|
||||||
cleanupHandlers.list = append(cleanupHandlers.list, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunCleanupHandlers runs all registered cleanup handlers
|
|
||||||
func RunCleanupHandlers(code int) int {
|
|
||||||
cleanupHandlers.Lock()
|
|
||||||
defer cleanupHandlers.Unlock()
|
|
||||||
|
|
||||||
if cleanupHandlers.done {
|
|
||||||
return code
|
|
||||||
}
|
}
|
||||||
cleanupHandlers.done = true
|
|
||||||
|
|
||||||
for _, f := range cleanupHandlers.list {
|
cancel()
|
||||||
var err error
|
|
||||||
code, err = f(code)
|
|
||||||
if err != nil {
|
|
||||||
Warnf("error in cleanup handler: %v\n", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cleanupHandlers.list = nil
|
|
||||||
return code
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CleanupHandler handles the SIGINT and SIGTERM signals.
|
// Exit terminates the process with the given exit code.
|
||||||
func CleanupHandler(c <-chan os.Signal) {
|
|
||||||
for s := range c {
|
|
||||||
debug.Log("signal %v received, cleaning up", s)
|
|
||||||
Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s)
|
|
||||||
|
|
||||||
if val, _ := os.LookupEnv("RESTIC_DEBUG_STACKTRACE_SIGINT"); val != "" {
|
|
||||||
_, _ = os.Stderr.WriteString("\n--- STACKTRACE START ---\n\n")
|
|
||||||
_, _ = os.Stderr.WriteString(debug.DumpStacktrace())
|
|
||||||
_, _ = os.Stderr.WriteString("\n--- STACKTRACE END ---\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
code := 0
|
|
||||||
|
|
||||||
if s == syscall.SIGINT || s == syscall.SIGTERM {
|
|
||||||
code = 130
|
|
||||||
} else {
|
|
||||||
code = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
Exit(code)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exit runs the cleanup handlers and then terminates the process with the
|
|
||||||
// given exit code.
|
|
||||||
func Exit(code int) {
|
func Exit(code int) {
|
||||||
code = RunCleanupHandlers(code)
|
|
||||||
debug.Log("exiting with status code %d", code)
|
debug.Log("exiting with status code %d", code)
|
||||||
os.Exit(code)
|
os.Exit(code)
|
||||||
}
|
}
|
||||||
|
|
|
@ -114,7 +114,7 @@ func init() {
|
||||||
f.BoolVar(&backupOptions.StdinCommand, "stdin-from-command", false, "interpret arguments as command to execute and store its stdout")
|
f.BoolVar(&backupOptions.StdinCommand, "stdin-from-command", false, "interpret arguments as command to execute and store its stdout")
|
||||||
f.Var(&backupOptions.Tags, "tag", "add `tags` for the new snapshot in the format `tag[,tag,...]` (can be specified multiple times)")
|
f.Var(&backupOptions.Tags, "tag", "add `tags` for the new snapshot in the format `tag[,tag,...]` (can be specified multiple times)")
|
||||||
f.UintVar(&backupOptions.ReadConcurrency, "read-concurrency", 0, "read `n` files concurrently (default: $RESTIC_READ_CONCURRENCY or 2)")
|
f.UintVar(&backupOptions.ReadConcurrency, "read-concurrency", 0, "read `n` files concurrently (default: $RESTIC_READ_CONCURRENCY or 2)")
|
||||||
f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually. To prevent an expensive rescan use the \"parent\" flag")
|
f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually (default: $RESTIC_HOST). To prevent an expensive rescan use the \"parent\" flag")
|
||||||
f.StringVar(&backupOptions.Host, "hostname", "", "set the `hostname` for the snapshot manually")
|
f.StringVar(&backupOptions.Host, "hostname", "", "set the `hostname` for the snapshot manually")
|
||||||
err := f.MarkDeprecated("hostname", "use --host")
|
err := f.MarkDeprecated("hostname", "use --host")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -137,6 +137,11 @@ func init() {
|
||||||
// parse read concurrency from env, on error the default value will be used
|
// parse read concurrency from env, on error the default value will be used
|
||||||
readConcurrency, _ := strconv.ParseUint(os.Getenv("RESTIC_READ_CONCURRENCY"), 10, 32)
|
readConcurrency, _ := strconv.ParseUint(os.Getenv("RESTIC_READ_CONCURRENCY"), 10, 32)
|
||||||
backupOptions.ReadConcurrency = uint(readConcurrency)
|
backupOptions.ReadConcurrency = uint(readConcurrency)
|
||||||
|
|
||||||
|
// parse host from env, if not exists or empty the default value will be used
|
||||||
|
if host := os.Getenv("RESTIC_HOST"); host != "" {
|
||||||
|
backupOptions.Host = host
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// filterExisting returns a slice of all existing items, or an error if no
|
// filterExisting returns a slice of all existing items, or an error if no
|
||||||
|
|
|
@ -199,10 +199,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup := prepareCheckCache(opts, &gopts)
|
cleanup := prepareCheckCache(opts, &gopts)
|
||||||
AddCleanupHandler(func(code int) (int, error) {
|
defer cleanup()
|
||||||
cleanup()
|
|
||||||
return code, nil
|
|
||||||
})
|
|
||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
Verbosef("create exclusive lock for repository\n")
|
Verbosef("create exclusive lock for repository\n")
|
||||||
|
@ -222,6 +219,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
||||||
Verbosef("load indexes\n")
|
Verbosef("load indexes\n")
|
||||||
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||||
hints, errs := chkr.LoadIndex(ctx, bar)
|
hints, errs := chkr.LoadIndex(ctx, bar)
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
errorsFound := false
|
errorsFound := false
|
||||||
suggestIndexRebuild := false
|
suggestIndexRebuild := false
|
||||||
|
@ -283,6 +283,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
||||||
if orphanedPacks > 0 {
|
if orphanedPacks > 0 {
|
||||||
Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks)
|
Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks)
|
||||||
}
|
}
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
Verbosef("check snapshots, trees and blobs\n")
|
Verbosef("check snapshots, trees and blobs\n")
|
||||||
errChan = make(chan error)
|
errChan = make(chan error)
|
||||||
|
@ -316,9 +319,16 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
||||||
// Must happen after `errChan` is read from in the above loop to avoid
|
// Must happen after `errChan` is read from in the above loop to avoid
|
||||||
// deadlocking in the case of errors.
|
// deadlocking in the case of errors.
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
if opts.CheckUnused {
|
if opts.CheckUnused {
|
||||||
for _, id := range chkr.UnusedBlobs(ctx) {
|
unused, err := chkr.UnusedBlobs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, id := range unused {
|
||||||
Verbosef("unused blob %v\n", id)
|
Verbosef("unused blob %v\n", id)
|
||||||
errorsFound = true
|
errorsFound = true
|
||||||
}
|
}
|
||||||
|
@ -395,10 +405,13 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
||||||
doReadData(packs)
|
doReadData(packs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
if errorsFound {
|
if errorsFound {
|
||||||
return errors.Fatal("repository contains errors")
|
return errors.Fatal("repository contains errors")
|
||||||
}
|
}
|
||||||
|
|
||||||
Verbosef("no errors were found\n")
|
Verbosef("no errors were found\n")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -53,7 +53,7 @@ func init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []string) error {
|
func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []string) error {
|
||||||
secondaryGopts, isFromRepo, err := fillSecondaryGlobalOpts(opts.secondaryRepoOptions, gopts, "destination")
|
secondaryGopts, isFromRepo, err := fillSecondaryGlobalOpts(ctx, opts.secondaryRepoOptions, gopts, "destination")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -103,6 +103,9 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
|
||||||
// also consider identical snapshot copies
|
// also consider identical snapshot copies
|
||||||
dstSnapshotByOriginal[*sn.ID()] = append(dstSnapshotByOriginal[*sn.ID()], sn)
|
dstSnapshotByOriginal[*sn.ID()] = append(dstSnapshotByOriginal[*sn.ID()], sn)
|
||||||
}
|
}
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
// remember already processed trees across all snapshots
|
// remember already processed trees across all snapshots
|
||||||
visitedTrees := restic.NewIDSet()
|
visitedTrees := restic.NewIDSet()
|
||||||
|
@ -147,7 +150,7 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
|
||||||
}
|
}
|
||||||
Verbosef("snapshot %s saved\n", newID.Str())
|
Verbosef("snapshot %s saved\n", newID.Str())
|
||||||
}
|
}
|
||||||
return nil
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
func similarSnapshots(sna *restic.Snapshot, snb *restic.Snapshot) bool {
|
func similarSnapshots(sna *restic.Snapshot, snb *restic.Snapshot) bool {
|
||||||
|
|
|
@ -439,7 +439,10 @@ func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error {
|
||||||
|
|
||||||
if err != errAllPacksFound {
|
if err != errAllPacksFound {
|
||||||
// try to resolve unknown pack ids from the index
|
// try to resolve unknown pack ids from the index
|
||||||
packIDs = f.indexPacksToBlobs(ctx, packIDs)
|
packIDs, err = f.indexPacksToBlobs(ctx, packIDs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(packIDs) > 0 {
|
if len(packIDs) > 0 {
|
||||||
|
@ -456,13 +459,13 @@ func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struct{}) map[string]struct{} {
|
func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struct{}) (map[string]struct{}, error) {
|
||||||
wctx, cancel := context.WithCancel(ctx)
|
wctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// remember which packs were found in the index
|
// remember which packs were found in the index
|
||||||
indexPackIDs := make(map[string]struct{})
|
indexPackIDs := make(map[string]struct{})
|
||||||
f.repo.Index().Each(wctx, func(pb restic.PackedBlob) {
|
err := f.repo.Index().Each(wctx, func(pb restic.PackedBlob) {
|
||||||
idStr := pb.PackID.String()
|
idStr := pb.PackID.String()
|
||||||
// keep entry in packIDs as Each() returns individual index entries
|
// keep entry in packIDs as Each() returns individual index entries
|
||||||
matchingID := false
|
matchingID := false
|
||||||
|
@ -481,6 +484,9 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc
|
||||||
indexPackIDs[idStr] = struct{}{}
|
indexPackIDs[idStr] = struct{}{}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
for id := range indexPackIDs {
|
for id := range indexPackIDs {
|
||||||
delete(packIDs, id)
|
delete(packIDs, id)
|
||||||
|
@ -493,7 +499,7 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc
|
||||||
}
|
}
|
||||||
Warnf("some pack files are missing from the repository, getting their blobs from the repository index: %v\n\n", list)
|
Warnf("some pack files are missing from the repository, getting their blobs from the repository index: %v\n\n", list)
|
||||||
}
|
}
|
||||||
return packIDs
|
return packIDs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Finder) findObjectPack(id string, t restic.BlobType) {
|
func (f *Finder) findObjectPack(id string, t restic.BlobType) {
|
||||||
|
@ -608,6 +614,9 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
|
||||||
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, opts.Snapshots) {
|
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, opts.Snapshots) {
|
||||||
filteredSnapshots = append(filteredSnapshots, sn)
|
filteredSnapshots = append(filteredSnapshots, sn)
|
||||||
}
|
}
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
sort.Slice(filteredSnapshots, func(i, j int) bool {
|
sort.Slice(filteredSnapshots, func(i, j int) bool {
|
||||||
return filteredSnapshots[i].Time.Before(filteredSnapshots[j].Time)
|
return filteredSnapshots[i].Time.Before(filteredSnapshots[j].Time)
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
|
|
||||||
"github.com/restic/restic/internal/errors"
|
"github.com/restic/restic/internal/errors"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
|
"github.com/restic/restic/internal/ui/termstatus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -33,7 +34,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
|
||||||
`,
|
`,
|
||||||
DisableAutoGenTag: true,
|
DisableAutoGenTag: true,
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
return runForget(cmd.Context(), forgetOptions, forgetPruneOptions, globalOptions, args)
|
term, cancel := setupTermstatus()
|
||||||
|
defer cancel()
|
||||||
|
return runForget(cmd.Context(), forgetOptions, forgetPruneOptions, globalOptions, term, args)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,7 +155,7 @@ func verifyForgetOptions(opts *ForgetOptions) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOptions, gopts GlobalOptions, args []string) error {
|
func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error {
|
||||||
err := verifyForgetOptions(&opts)
|
err := verifyForgetOptions(&opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -173,12 +176,21 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
|
||||||
}
|
}
|
||||||
defer unlock()
|
defer unlock()
|
||||||
|
|
||||||
|
verbosity := gopts.verbosity
|
||||||
|
if gopts.JSON {
|
||||||
|
verbosity = 0
|
||||||
|
}
|
||||||
|
printer := newTerminalProgressPrinter(verbosity, term)
|
||||||
|
|
||||||
var snapshots restic.Snapshots
|
var snapshots restic.Snapshots
|
||||||
removeSnIDs := restic.NewIDSet()
|
removeSnIDs := restic.NewIDSet()
|
||||||
|
|
||||||
for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) {
|
for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) {
|
||||||
snapshots = append(snapshots, sn)
|
snapshots = append(snapshots, sn)
|
||||||
}
|
}
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
var jsonGroups []*ForgetGroup
|
var jsonGroups []*ForgetGroup
|
||||||
|
|
||||||
|
@ -210,15 +222,11 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
|
||||||
}
|
}
|
||||||
|
|
||||||
if policy.Empty() && len(args) == 0 {
|
if policy.Empty() && len(args) == 0 {
|
||||||
if !gopts.JSON {
|
printer.P("no policy was specified, no snapshots will be removed\n")
|
||||||
Verbosef("no policy was specified, no snapshots will be removed\n")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !policy.Empty() {
|
if !policy.Empty() {
|
||||||
if !gopts.JSON {
|
printer.P("Applying Policy: %v\n", policy)
|
||||||
Verbosef("Applying Policy: %v\n", policy)
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, snapshotGroup := range snapshotGroups {
|
for k, snapshotGroup := range snapshotGroups {
|
||||||
if gopts.Verbose >= 1 && !gopts.JSON {
|
if gopts.Verbose >= 1 && !gopts.JSON {
|
||||||
|
@ -241,16 +249,16 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
|
||||||
keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy)
|
keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy)
|
||||||
|
|
||||||
if len(keep) != 0 && !gopts.Quiet && !gopts.JSON {
|
if len(keep) != 0 && !gopts.Quiet && !gopts.JSON {
|
||||||
Printf("keep %d snapshots:\n", len(keep))
|
printer.P("keep %d snapshots:\n", len(keep))
|
||||||
PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact)
|
PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact)
|
||||||
Printf("\n")
|
printer.P("\n")
|
||||||
}
|
}
|
||||||
fg.Keep = asJSONSnapshots(keep)
|
fg.Keep = asJSONSnapshots(keep)
|
||||||
|
|
||||||
if len(remove) != 0 && !gopts.Quiet && !gopts.JSON {
|
if len(remove) != 0 && !gopts.Quiet && !gopts.JSON {
|
||||||
Printf("remove %d snapshots:\n", len(remove))
|
printer.P("remove %d snapshots:\n", len(remove))
|
||||||
PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact)
|
PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact)
|
||||||
Printf("\n")
|
printer.P("\n")
|
||||||
}
|
}
|
||||||
fg.Remove = asJSONSnapshots(remove)
|
fg.Remove = asJSONSnapshots(remove)
|
||||||
|
|
||||||
|
@ -265,16 +273,27 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
if len(removeSnIDs) > 0 {
|
if len(removeSnIDs) > 0 {
|
||||||
if !opts.DryRun {
|
if !opts.DryRun {
|
||||||
err := DeleteFilesChecked(ctx, gopts, repo, removeSnIDs, restic.SnapshotFile)
|
bar := printer.NewCounter("files deleted")
|
||||||
|
err := restic.ParallelRemove(ctx, repo, removeSnIDs, restic.SnapshotFile, func(id restic.ID, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
printer.E("unable to remove %v/%v from the repository\n", restic.SnapshotFile, id)
|
||||||
|
} else {
|
||||||
|
printer.VV("removed %v/%v\n", restic.SnapshotFile, id)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}, bar)
|
||||||
|
bar.Done()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !gopts.JSON {
|
printer.P("Would have removed the following snapshots:\n%v\n\n", removeSnIDs)
|
||||||
Printf("Would have removed the following snapshots:\n%v\n\n", removeSnIDs)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -286,15 +305,13 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(removeSnIDs) > 0 && opts.Prune {
|
if len(removeSnIDs) > 0 && opts.Prune {
|
||||||
if !gopts.JSON {
|
if opts.DryRun {
|
||||||
if opts.DryRun {
|
printer.P("%d snapshots would be removed, running prune dry run\n", len(removeSnIDs))
|
||||||
Verbosef("%d snapshots would be removed, running prune dry run\n", len(removeSnIDs))
|
} else {
|
||||||
} else {
|
printer.P("%d snapshots have been removed, running prune\n", len(removeSnIDs))
|
||||||
Verbosef("%d snapshots have been removed, running prune\n", len(removeSnIDs))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
pruneOptions.DryRun = opts.DryRun
|
pruneOptions.DryRun = opts.DryRun
|
||||||
return runPruneWithRepo(ctx, pruneOptions, gopts, repo, removeSnIDs)
|
return runPruneWithRepo(ctx, pruneOptions, gopts, repo, removeSnIDs, term)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
rtest "github.com/restic/restic/internal/test"
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
"github.com/restic/restic/internal/ui/termstatus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
|
func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
|
||||||
|
@ -12,5 +13,7 @@ func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
|
||||||
pruneOpts := PruneOptions{
|
pruneOpts := PruneOptions{
|
||||||
MaxUnused: "5%",
|
MaxUnused: "5%",
|
||||||
}
|
}
|
||||||
rtest.OK(t, runForget(context.TODO(), opts, pruneOpts, gopts, args))
|
rtest.OK(t, withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||||
|
return runForget(context.TODO(), opts, pruneOpts, gopts, term, args)
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
|
|
|
@ -80,7 +80,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
gopts.password, err = ReadPasswordTwice(gopts,
|
gopts.password, err = ReadPasswordTwice(ctx, gopts,
|
||||||
"enter password for new repository: ",
|
"enter password for new repository: ",
|
||||||
"enter password again: ")
|
"enter password again: ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -131,7 +131,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
|
||||||
|
|
||||||
func maybeReadChunkerPolynomial(ctx context.Context, opts InitOptions, gopts GlobalOptions) (*chunker.Pol, error) {
|
func maybeReadChunkerPolynomial(ctx context.Context, opts InitOptions, gopts GlobalOptions) (*chunker.Pol, error) {
|
||||||
if opts.CopyChunkerParameters {
|
if opts.CopyChunkerParameters {
|
||||||
otherGopts, _, err := fillSecondaryGlobalOpts(opts.secondaryRepoOptions, gopts, "secondary")
|
otherGopts, _, err := fillSecondaryGlobalOpts(ctx, opts.secondaryRepoOptions, gopts, "secondary")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,7 +60,7 @@ func runKeyAdd(ctx context.Context, gopts GlobalOptions, opts KeyAddOptions, arg
|
||||||
}
|
}
|
||||||
|
|
||||||
func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyAddOptions) error {
|
func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyAddOptions) error {
|
||||||
pw, err := getNewPassword(gopts, opts.NewPasswordFile)
|
pw, err := getNewPassword(ctx, gopts, opts.NewPasswordFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -83,7 +83,7 @@ func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOption
|
||||||
// testKeyNewPassword is used to set a new password during integration testing.
|
// testKeyNewPassword is used to set a new password during integration testing.
|
||||||
var testKeyNewPassword string
|
var testKeyNewPassword string
|
||||||
|
|
||||||
func getNewPassword(gopts GlobalOptions, newPasswordFile string) (string, error) {
|
func getNewPassword(ctx context.Context, gopts GlobalOptions, newPasswordFile string) (string, error) {
|
||||||
if testKeyNewPassword != "" {
|
if testKeyNewPassword != "" {
|
||||||
return testKeyNewPassword, nil
|
return testKeyNewPassword, nil
|
||||||
}
|
}
|
||||||
|
@ -97,7 +97,7 @@ func getNewPassword(gopts GlobalOptions, newPasswordFile string) (string, error)
|
||||||
newopts := gopts
|
newopts := gopts
|
||||||
newopts.password = ""
|
newopts.password = ""
|
||||||
|
|
||||||
return ReadPasswordTwice(newopts,
|
return ReadPasswordTwice(ctx, newopts,
|
||||||
"enter new password: ",
|
"enter new password: ",
|
||||||
"enter password again: ")
|
"enter password again: ")
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,7 +57,7 @@ func runKeyPasswd(ctx context.Context, gopts GlobalOptions, opts KeyPasswdOption
|
||||||
}
|
}
|
||||||
|
|
||||||
func changePassword(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyPasswdOptions) error {
|
func changePassword(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyPasswdOptions) error {
|
||||||
pw, err := getNewPassword(gopts, opts.NewPasswordFile)
|
pw, err := getNewPassword(ctx, gopts, opts.NewPasswordFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,10 +59,9 @@ func runList(ctx context.Context, gopts GlobalOptions, args []string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
idx.Each(ctx, func(blobs restic.PackedBlob) {
|
return idx.Each(ctx, func(blobs restic.PackedBlob) {
|
||||||
Printf("%v %v\n", blobs.Type, blobs.ID)
|
Printf("%v %v\n", blobs.Type, blobs.ID)
|
||||||
})
|
})
|
||||||
return nil
|
|
||||||
})
|
})
|
||||||
default:
|
default:
|
||||||
return errors.Fatal("invalid type")
|
return errors.Fatal("invalid type")
|
||||||
|
|
|
@ -152,28 +152,15 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
AddCleanupHandler(func(code int) (int, error) {
|
systemFuse.Debug = func(msg interface{}) {
|
||||||
debug.Log("running umount cleanup handler for mount at %v", mountpoint)
|
debug.Log("fuse: %v", msg)
|
||||||
err := umount(mountpoint)
|
}
|
||||||
if err != nil {
|
|
||||||
Warnf("unable to umount (maybe already umounted or still in use?): %v\n", err)
|
|
||||||
}
|
|
||||||
// replace error code of sigint
|
|
||||||
if code == 130 {
|
|
||||||
code = 0
|
|
||||||
}
|
|
||||||
return code, nil
|
|
||||||
})
|
|
||||||
|
|
||||||
c, err := systemFuse.Mount(mountpoint, mountOptions...)
|
c, err := systemFuse.Mount(mountpoint, mountOptions...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
systemFuse.Debug = func(msg interface{}) {
|
|
||||||
debug.Log("fuse: %v", msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg := fuse.Config{
|
cfg := fuse.Config{
|
||||||
OwnerIsRoot: opts.OwnerRoot,
|
OwnerIsRoot: opts.OwnerRoot,
|
||||||
Filter: opts.SnapshotFilter,
|
Filter: opts.SnapshotFilter,
|
||||||
|
@ -187,15 +174,26 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args
|
||||||
Printf("When finished, quit with Ctrl-c here or umount the mountpoint.\n")
|
Printf("When finished, quit with Ctrl-c here or umount the mountpoint.\n")
|
||||||
|
|
||||||
debug.Log("serving mount at %v", mountpoint)
|
debug.Log("serving mount at %v", mountpoint)
|
||||||
err = fs.Serve(c, root)
|
|
||||||
if err != nil {
|
done := make(chan struct{})
|
||||||
return err
|
|
||||||
|
go func() {
|
||||||
|
defer close(done)
|
||||||
|
err = fs.Serve(c, root)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
debug.Log("running umount cleanup handler for mount at %v", mountpoint)
|
||||||
|
err := systemFuse.Unmount(mountpoint)
|
||||||
|
if err != nil {
|
||||||
|
Warnf("unable to umount (maybe already umounted or still in use?): %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ErrOK
|
||||||
|
case <-done:
|
||||||
|
// clean shutdown, nothing to do
|
||||||
}
|
}
|
||||||
|
|
||||||
<-c.Ready
|
return err
|
||||||
return c.MountError
|
|
||||||
}
|
|
||||||
|
|
||||||
func umount(mountpoint string) error {
|
|
||||||
return systemFuse.Unmount(mountpoint)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
systemFuse "github.com/anacrolix/fuse"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
rtest "github.com/restic/restic/internal/test"
|
rtest "github.com/restic/restic/internal/test"
|
||||||
)
|
)
|
||||||
|
@ -65,7 +66,7 @@ func testRunMount(t testing.TB, gopts GlobalOptions, dir string, wg *sync.WaitGr
|
||||||
func testRunUmount(t testing.TB, dir string) {
|
func testRunUmount(t testing.TB, dir string) {
|
||||||
var err error
|
var err error
|
||||||
for i := 0; i < mountWait; i++ {
|
for i := 0; i < mountWait; i++ {
|
||||||
if err = umount(dir); err == nil {
|
if err = systemFuse.Unmount(dir); err == nil {
|
||||||
t.Logf("directory %v umounted", dir)
|
t.Logf("directory %v umounted", dir)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,26 +4,20 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"math"
|
"math"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/restic/restic/internal/debug"
|
"github.com/restic/restic/internal/debug"
|
||||||
"github.com/restic/restic/internal/errors"
|
"github.com/restic/restic/internal/errors"
|
||||||
"github.com/restic/restic/internal/index"
|
|
||||||
"github.com/restic/restic/internal/pack"
|
|
||||||
"github.com/restic/restic/internal/repository"
|
"github.com/restic/restic/internal/repository"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
"github.com/restic/restic/internal/ui"
|
"github.com/restic/restic/internal/ui"
|
||||||
"github.com/restic/restic/internal/ui/progress"
|
"github.com/restic/restic/internal/ui/progress"
|
||||||
|
"github.com/restic/restic/internal/ui/termstatus"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
var errorIndexIncomplete = errors.Fatal("index is not complete")
|
|
||||||
var errorPacksMissing = errors.Fatal("packs from index missing in repo")
|
|
||||||
var errorSizeNotMatching = errors.Fatal("pack size does not match calculated size from index")
|
|
||||||
|
|
||||||
var cmdPrune = &cobra.Command{
|
var cmdPrune = &cobra.Command{
|
||||||
Use: "prune [flags]",
|
Use: "prune [flags]",
|
||||||
Short: "Remove unneeded data from the repository",
|
Short: "Remove unneeded data from the repository",
|
||||||
|
@ -38,7 +32,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
|
||||||
`,
|
`,
|
||||||
DisableAutoGenTag: true,
|
DisableAutoGenTag: true,
|
||||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||||
return runPrune(cmd.Context(), pruneOptions, globalOptions)
|
term, cancel := setupTermstatus()
|
||||||
|
defer cancel()
|
||||||
|
return runPrune(cmd.Context(), pruneOptions, globalOptions, term)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,7 +134,7 @@ func verifyPruneOptions(opts *PruneOptions) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error {
|
func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, term *termstatus.Terminal) error {
|
||||||
err := verifyPruneOptions(&opts)
|
err := verifyPruneOptions(&opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -154,14 +150,6 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error
|
||||||
}
|
}
|
||||||
defer unlock()
|
defer unlock()
|
||||||
|
|
||||||
if repo.Connections() < 2 {
|
|
||||||
return errors.Fatal("prune requires a backend connection limit of at least two")
|
|
||||||
}
|
|
||||||
|
|
||||||
if repo.Config().Version < 2 && opts.RepackUncompressed {
|
|
||||||
return errors.Fatal("compression requires at least repository format version 2")
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.UnsafeNoSpaceRecovery != "" {
|
if opts.UnsafeNoSpaceRecovery != "" {
|
||||||
repoID := repo.Config().ID
|
repoID := repo.Config().ID
|
||||||
if opts.UnsafeNoSpaceRecovery != repoID {
|
if opts.UnsafeNoSpaceRecovery != repoID {
|
||||||
|
@ -170,10 +158,10 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error
|
||||||
opts.unsafeRecovery = true
|
opts.unsafeRecovery = true
|
||||||
}
|
}
|
||||||
|
|
||||||
return runPruneWithRepo(ctx, opts, gopts, repo, restic.NewIDSet())
|
return runPruneWithRepo(ctx, opts, gopts, repo, restic.NewIDSet(), term)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo *repository.Repository, ignoreSnapshots restic.IDSet) error {
|
func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo *repository.Repository, ignoreSnapshots restic.IDSet, term *termstatus.Terminal) error {
|
||||||
// we do not need index updates while pruning!
|
// we do not need index updates while pruning!
|
||||||
repo.DisableAutoIndexUpdate()
|
repo.DisableAutoIndexUpdate()
|
||||||
|
|
||||||
|
@ -181,24 +169,43 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption
|
||||||
Print("warning: running prune without a cache, this may be very slow!\n")
|
Print("warning: running prune without a cache, this may be very slow!\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
Verbosef("loading indexes...\n")
|
printer := newTerminalProgressPrinter(gopts.verbosity, term)
|
||||||
|
|
||||||
|
printer.P("loading indexes...\n")
|
||||||
// loading the index before the snapshots is ok, as we use an exclusive lock here
|
// loading the index before the snapshots is ok, as we use an exclusive lock here
|
||||||
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term)
|
||||||
err := repo.LoadIndex(ctx, bar)
|
err := repo.LoadIndex(ctx, bar)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
plan, stats, err := planPrune(ctx, opts, repo, ignoreSnapshots, gopts.Quiet)
|
popts := repository.PruneOptions{
|
||||||
|
DryRun: opts.DryRun,
|
||||||
|
UnsafeRecovery: opts.unsafeRecovery,
|
||||||
|
|
||||||
|
MaxUnusedBytes: opts.maxUnusedBytes,
|
||||||
|
MaxRepackBytes: opts.MaxRepackBytes,
|
||||||
|
|
||||||
|
RepackCachableOnly: opts.RepackCachableOnly,
|
||||||
|
RepackSmall: opts.RepackSmall,
|
||||||
|
RepackUncompressed: opts.RepackUncompressed,
|
||||||
|
}
|
||||||
|
|
||||||
|
plan, err := repository.PlanPrune(ctx, popts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) {
|
||||||
|
return getUsedBlobs(ctx, repo, ignoreSnapshots, printer)
|
||||||
|
}, printer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if ctx.Err() != nil {
|
||||||
if opts.DryRun {
|
return ctx.Err()
|
||||||
Verbosef("\nWould have made the following changes:")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err = printPruneStats(stats)
|
if popts.DryRun {
|
||||||
|
printer.P("\nWould have made the following changes:")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = printPruneStats(printer, plan.Stats())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -206,605 +213,54 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption
|
||||||
// Trigger GC to reset garbage collection threshold
|
// Trigger GC to reset garbage collection threshold
|
||||||
runtime.GC()
|
runtime.GC()
|
||||||
|
|
||||||
return doPrune(ctx, opts, gopts, repo, plan)
|
return plan.Execute(ctx, printer)
|
||||||
}
|
|
||||||
|
|
||||||
type pruneStats struct {
|
|
||||||
blobs struct {
|
|
||||||
used uint
|
|
||||||
duplicate uint
|
|
||||||
unused uint
|
|
||||||
remove uint
|
|
||||||
repack uint
|
|
||||||
repackrm uint
|
|
||||||
}
|
|
||||||
size struct {
|
|
||||||
used uint64
|
|
||||||
duplicate uint64
|
|
||||||
unused uint64
|
|
||||||
remove uint64
|
|
||||||
repack uint64
|
|
||||||
repackrm uint64
|
|
||||||
unref uint64
|
|
||||||
uncompressed uint64
|
|
||||||
}
|
|
||||||
packs struct {
|
|
||||||
used uint
|
|
||||||
unused uint
|
|
||||||
partlyUsed uint
|
|
||||||
unref uint
|
|
||||||
keep uint
|
|
||||||
repack uint
|
|
||||||
remove uint
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type prunePlan struct {
|
|
||||||
removePacksFirst restic.IDSet // packs to remove first (unreferenced packs)
|
|
||||||
repackPacks restic.IDSet // packs to repack
|
|
||||||
keepBlobs restic.CountedBlobSet // blobs to keep during repacking
|
|
||||||
removePacks restic.IDSet // packs to remove
|
|
||||||
ignorePacks restic.IDSet // packs to ignore when rebuilding the index
|
|
||||||
}
|
|
||||||
|
|
||||||
type packInfo struct {
|
|
||||||
usedBlobs uint
|
|
||||||
unusedBlobs uint
|
|
||||||
usedSize uint64
|
|
||||||
unusedSize uint64
|
|
||||||
tpe restic.BlobType
|
|
||||||
uncompressed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type packInfoWithID struct {
|
|
||||||
ID restic.ID
|
|
||||||
packInfo
|
|
||||||
mustCompress bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// planPrune selects which files to rewrite and which to delete and which blobs to keep.
|
|
||||||
// Also some summary statistics are returned.
|
|
||||||
func planPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, ignoreSnapshots restic.IDSet, quiet bool) (prunePlan, pruneStats, error) {
|
|
||||||
var stats pruneStats
|
|
||||||
|
|
||||||
usedBlobs, err := getUsedBlobs(ctx, repo, ignoreSnapshots, quiet)
|
|
||||||
if err != nil {
|
|
||||||
return prunePlan{}, stats, err
|
|
||||||
}
|
|
||||||
|
|
||||||
Verbosef("searching used packs...\n")
|
|
||||||
keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats)
|
|
||||||
if err != nil {
|
|
||||||
return prunePlan{}, stats, err
|
|
||||||
}
|
|
||||||
|
|
||||||
Verbosef("collecting packs for deletion and repacking\n")
|
|
||||||
plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, quiet)
|
|
||||||
if err != nil {
|
|
||||||
return prunePlan{}, stats, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(plan.repackPacks) != 0 {
|
|
||||||
blobCount := keepBlobs.Len()
|
|
||||||
// when repacking, we do not want to keep blobs which are
|
|
||||||
// already contained in kept packs, so delete them from keepBlobs
|
|
||||||
repo.Index().Each(ctx, func(blob restic.PackedBlob) {
|
|
||||||
if plan.removePacks.Has(blob.PackID) || plan.repackPacks.Has(blob.PackID) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
keepBlobs.Delete(blob.BlobHandle)
|
|
||||||
})
|
|
||||||
|
|
||||||
if keepBlobs.Len() < blobCount/2 {
|
|
||||||
// replace with copy to shrink map to necessary size if there's a chance to benefit
|
|
||||||
keepBlobs = keepBlobs.Copy()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// keepBlobs is only needed if packs are repacked
|
|
||||||
keepBlobs = nil
|
|
||||||
}
|
|
||||||
plan.keepBlobs = keepBlobs
|
|
||||||
|
|
||||||
return plan, stats, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *pruneStats) (restic.CountedBlobSet, map[restic.ID]packInfo, error) {
|
|
||||||
// iterate over all blobs in index to find out which blobs are duplicates
|
|
||||||
// The counter in usedBlobs describes how many instances of the blob exist in the repository index
|
|
||||||
// Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist
|
|
||||||
idx.Each(ctx, func(blob restic.PackedBlob) {
|
|
||||||
bh := blob.BlobHandle
|
|
||||||
count, ok := usedBlobs[bh]
|
|
||||||
if ok {
|
|
||||||
if count < math.MaxUint8 {
|
|
||||||
// don't overflow, but saturate count at 255
|
|
||||||
// this can lead to a non-optimal pack selection, but won't cause
|
|
||||||
// problems otherwise
|
|
||||||
count++
|
|
||||||
}
|
|
||||||
|
|
||||||
usedBlobs[bh] = count
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Check if all used blobs have been found in index
|
|
||||||
missingBlobs := restic.NewBlobSet()
|
|
||||||
for bh, count := range usedBlobs {
|
|
||||||
if count == 0 {
|
|
||||||
// blob does not exist in any pack files
|
|
||||||
missingBlobs.Insert(bh)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(missingBlobs) != 0 {
|
|
||||||
Warnf("%v not found in the index\n\n"+
|
|
||||||
"Integrity check failed: Data seems to be missing.\n"+
|
|
||||||
"Will not start prune to prevent (additional) data loss!\n"+
|
|
||||||
"Please report this error (along with the output of the 'prune' run) at\n"+
|
|
||||||
"https://github.com/restic/restic/issues/new/choose\n", missingBlobs)
|
|
||||||
return nil, nil, errorIndexIncomplete
|
|
||||||
}
|
|
||||||
|
|
||||||
indexPack := make(map[restic.ID]packInfo)
|
|
||||||
|
|
||||||
// save computed pack header size
|
|
||||||
for pid, hdrSize := range pack.Size(ctx, idx, true) {
|
|
||||||
// initialize tpe with NumBlobTypes to indicate it's not set
|
|
||||||
indexPack[pid] = packInfo{tpe: restic.NumBlobTypes, usedSize: uint64(hdrSize)}
|
|
||||||
}
|
|
||||||
|
|
||||||
hasDuplicates := false
|
|
||||||
// iterate over all blobs in index to generate packInfo
|
|
||||||
idx.Each(ctx, func(blob restic.PackedBlob) {
|
|
||||||
ip := indexPack[blob.PackID]
|
|
||||||
|
|
||||||
// Set blob type if not yet set
|
|
||||||
if ip.tpe == restic.NumBlobTypes {
|
|
||||||
ip.tpe = blob.Type
|
|
||||||
}
|
|
||||||
|
|
||||||
// mark mixed packs with "Invalid blob type"
|
|
||||||
if ip.tpe != blob.Type {
|
|
||||||
ip.tpe = restic.InvalidBlob
|
|
||||||
}
|
|
||||||
|
|
||||||
bh := blob.BlobHandle
|
|
||||||
size := uint64(blob.Length)
|
|
||||||
dupCount := usedBlobs[bh]
|
|
||||||
switch {
|
|
||||||
case dupCount >= 2:
|
|
||||||
hasDuplicates = true
|
|
||||||
// mark as unused for now, we will later on select one copy
|
|
||||||
ip.unusedSize += size
|
|
||||||
ip.unusedBlobs++
|
|
||||||
|
|
||||||
// count as duplicate, will later on change one copy to be counted as used
|
|
||||||
stats.size.duplicate += size
|
|
||||||
stats.blobs.duplicate++
|
|
||||||
case dupCount == 1: // used blob, not duplicate
|
|
||||||
ip.usedSize += size
|
|
||||||
ip.usedBlobs++
|
|
||||||
|
|
||||||
stats.size.used += size
|
|
||||||
stats.blobs.used++
|
|
||||||
default: // unused blob
|
|
||||||
ip.unusedSize += size
|
|
||||||
ip.unusedBlobs++
|
|
||||||
|
|
||||||
stats.size.unused += size
|
|
||||||
stats.blobs.unused++
|
|
||||||
}
|
|
||||||
if !blob.IsCompressed() {
|
|
||||||
ip.uncompressed = true
|
|
||||||
}
|
|
||||||
// update indexPack
|
|
||||||
indexPack[blob.PackID] = ip
|
|
||||||
})
|
|
||||||
|
|
||||||
// if duplicate blobs exist, those will be set to either "used" or "unused":
|
|
||||||
// - mark only one occurrence of duplicate blobs as used
|
|
||||||
// - if there are already some used blobs in a pack, possibly mark duplicates in this pack as "used"
|
|
||||||
// - if there are no used blobs in a pack, possibly mark duplicates as "unused"
|
|
||||||
if hasDuplicates {
|
|
||||||
// iterate again over all blobs in index (this is pretty cheap, all in-mem)
|
|
||||||
idx.Each(ctx, func(blob restic.PackedBlob) {
|
|
||||||
bh := blob.BlobHandle
|
|
||||||
count, ok := usedBlobs[bh]
|
|
||||||
// skip non-duplicate, aka. normal blobs
|
|
||||||
// count == 0 is used to mark that this was a duplicate blob with only a single occurrence remaining
|
|
||||||
if !ok || count == 1 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ip := indexPack[blob.PackID]
|
|
||||||
size := uint64(blob.Length)
|
|
||||||
switch {
|
|
||||||
case ip.usedBlobs > 0, count == 0:
|
|
||||||
// other used blobs in pack or "last" occurrence -> transition to used
|
|
||||||
ip.usedSize += size
|
|
||||||
ip.usedBlobs++
|
|
||||||
ip.unusedSize -= size
|
|
||||||
ip.unusedBlobs--
|
|
||||||
// same for the global statistics
|
|
||||||
stats.size.used += size
|
|
||||||
stats.blobs.used++
|
|
||||||
stats.size.duplicate -= size
|
|
||||||
stats.blobs.duplicate--
|
|
||||||
// let other occurrences remain marked as unused
|
|
||||||
usedBlobs[bh] = 1
|
|
||||||
default:
|
|
||||||
// remain unused and decrease counter
|
|
||||||
count--
|
|
||||||
if count == 1 {
|
|
||||||
// setting count to 1 would lead to forgetting that this blob had duplicates
|
|
||||||
// thus use the special value zero. This will select the last instance of the blob for keeping.
|
|
||||||
count = 0
|
|
||||||
}
|
|
||||||
usedBlobs[bh] = count
|
|
||||||
}
|
|
||||||
// update indexPack
|
|
||||||
indexPack[blob.PackID] = ip
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sanity check. If no duplicates exist, all blobs have value 1. After handling
|
|
||||||
// duplicates, this also applies to duplicates.
|
|
||||||
for _, count := range usedBlobs {
|
|
||||||
if count != 1 {
|
|
||||||
panic("internal error during blob selection")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return usedBlobs, indexPack, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *pruneStats, quiet bool) (prunePlan, error) {
|
|
||||||
removePacksFirst := restic.NewIDSet()
|
|
||||||
removePacks := restic.NewIDSet()
|
|
||||||
repackPacks := restic.NewIDSet()
|
|
||||||
|
|
||||||
var repackCandidates []packInfoWithID
|
|
||||||
var repackSmallCandidates []packInfoWithID
|
|
||||||
repoVersion := repo.Config().Version
|
|
||||||
// only repack very small files by default
|
|
||||||
targetPackSize := repo.PackSize() / 25
|
|
||||||
if opts.RepackSmall {
|
|
||||||
// consider files with at least 80% of the target size as large enough
|
|
||||||
targetPackSize = repo.PackSize() / 5 * 4
|
|
||||||
}
|
|
||||||
|
|
||||||
// loop over all packs and decide what to do
|
|
||||||
bar := newProgressMax(!quiet, uint64(len(indexPack)), "packs processed")
|
|
||||||
err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error {
|
|
||||||
p, ok := indexPack[id]
|
|
||||||
if !ok {
|
|
||||||
// Pack was not referenced in index and is not used => immediately remove!
|
|
||||||
Verboseff("will remove pack %v as it is unused and not indexed\n", id.Str())
|
|
||||||
removePacksFirst.Insert(id)
|
|
||||||
stats.size.unref += uint64(packSize)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.unusedSize+p.usedSize != uint64(packSize) && p.usedBlobs != 0 {
|
|
||||||
// Pack size does not fit and pack is needed => error
|
|
||||||
// If the pack is not needed, this is no error, the pack can
|
|
||||||
// and will be simply removed, see below.
|
|
||||||
Warnf("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n",
|
|
||||||
id.Str(), p.unusedSize+p.usedSize, packSize)
|
|
||||||
return errorSizeNotMatching
|
|
||||||
}
|
|
||||||
|
|
||||||
// statistics
|
|
||||||
switch {
|
|
||||||
case p.usedBlobs == 0:
|
|
||||||
stats.packs.unused++
|
|
||||||
case p.unusedBlobs == 0:
|
|
||||||
stats.packs.used++
|
|
||||||
default:
|
|
||||||
stats.packs.partlyUsed++
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.uncompressed {
|
|
||||||
stats.size.uncompressed += p.unusedSize + p.usedSize
|
|
||||||
}
|
|
||||||
mustCompress := false
|
|
||||||
if repoVersion >= 2 {
|
|
||||||
// repo v2: always repack tree blobs if uncompressed
|
|
||||||
// compress data blobs if requested
|
|
||||||
mustCompress = (p.tpe == restic.TreeBlob || opts.RepackUncompressed) && p.uncompressed
|
|
||||||
}
|
|
||||||
|
|
||||||
// decide what to do
|
|
||||||
switch {
|
|
||||||
case p.usedBlobs == 0:
|
|
||||||
// All blobs in pack are no longer used => remove pack!
|
|
||||||
removePacks.Insert(id)
|
|
||||||
stats.blobs.remove += p.unusedBlobs
|
|
||||||
stats.size.remove += p.unusedSize
|
|
||||||
|
|
||||||
case opts.RepackCachableOnly && p.tpe == restic.DataBlob:
|
|
||||||
// if this is a data pack and --repack-cacheable-only is set => keep pack!
|
|
||||||
stats.packs.keep++
|
|
||||||
|
|
||||||
case p.unusedBlobs == 0 && p.tpe != restic.InvalidBlob && !mustCompress:
|
|
||||||
if packSize >= int64(targetPackSize) {
|
|
||||||
// All blobs in pack are used and not mixed => keep pack!
|
|
||||||
stats.packs.keep++
|
|
||||||
} else {
|
|
||||||
repackSmallCandidates = append(repackSmallCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress})
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
// all other packs are candidates for repacking
|
|
||||||
repackCandidates = append(repackCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress})
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(indexPack, id)
|
|
||||||
bar.Add(1)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
bar.Done()
|
|
||||||
if err != nil {
|
|
||||||
return prunePlan{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// At this point indexPacks contains only missing packs!
|
|
||||||
|
|
||||||
// missing packs that are not needed can be ignored
|
|
||||||
ignorePacks := restic.NewIDSet()
|
|
||||||
for id, p := range indexPack {
|
|
||||||
if p.usedBlobs == 0 {
|
|
||||||
ignorePacks.Insert(id)
|
|
||||||
stats.blobs.remove += p.unusedBlobs
|
|
||||||
stats.size.remove += p.unusedSize
|
|
||||||
delete(indexPack, id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(indexPack) != 0 {
|
|
||||||
Warnf("The index references %d needed pack files which are missing from the repository:\n", len(indexPack))
|
|
||||||
for id := range indexPack {
|
|
||||||
Warnf(" %v\n", id)
|
|
||||||
}
|
|
||||||
return prunePlan{}, errorPacksMissing
|
|
||||||
}
|
|
||||||
if len(ignorePacks) != 0 {
|
|
||||||
Warnf("Missing but unneeded pack files are referenced in the index, will be repaired\n")
|
|
||||||
for id := range ignorePacks {
|
|
||||||
Warnf("will forget missing pack file %v\n", id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(repackSmallCandidates) < 10 {
|
|
||||||
// too few small files to be worth the trouble, this also prevents endlessly repacking
|
|
||||||
// if there is just a single pack file below the target size
|
|
||||||
stats.packs.keep += uint(len(repackSmallCandidates))
|
|
||||||
} else {
|
|
||||||
repackCandidates = append(repackCandidates, repackSmallCandidates...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort repackCandidates such that packs with highest ratio unused/used space are picked first.
|
|
||||||
// This is equivalent to sorting by unused / total space.
|
|
||||||
// Instead of unused[i] / used[i] > unused[j] / used[j] we use
|
|
||||||
// unused[i] * used[j] > unused[j] * used[i] as uint32*uint32 < uint64
|
|
||||||
// Moreover packs containing trees and too small packs are sorted to the beginning
|
|
||||||
sort.Slice(repackCandidates, func(i, j int) bool {
|
|
||||||
pi := repackCandidates[i].packInfo
|
|
||||||
pj := repackCandidates[j].packInfo
|
|
||||||
switch {
|
|
||||||
case pi.tpe != restic.DataBlob && pj.tpe == restic.DataBlob:
|
|
||||||
return true
|
|
||||||
case pj.tpe != restic.DataBlob && pi.tpe == restic.DataBlob:
|
|
||||||
return false
|
|
||||||
case pi.unusedSize+pi.usedSize < uint64(targetPackSize) && pj.unusedSize+pj.usedSize >= uint64(targetPackSize):
|
|
||||||
return true
|
|
||||||
case pj.unusedSize+pj.usedSize < uint64(targetPackSize) && pi.unusedSize+pi.usedSize >= uint64(targetPackSize):
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return pi.unusedSize*pj.usedSize > pj.unusedSize*pi.usedSize
|
|
||||||
})
|
|
||||||
|
|
||||||
repack := func(id restic.ID, p packInfo) {
|
|
||||||
repackPacks.Insert(id)
|
|
||||||
stats.blobs.repack += p.unusedBlobs + p.usedBlobs
|
|
||||||
stats.size.repack += p.unusedSize + p.usedSize
|
|
||||||
stats.blobs.repackrm += p.unusedBlobs
|
|
||||||
stats.size.repackrm += p.unusedSize
|
|
||||||
if p.uncompressed {
|
|
||||||
stats.size.uncompressed -= p.unusedSize + p.usedSize
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// calculate limit for number of unused bytes in the repo after repacking
|
|
||||||
maxUnusedSizeAfter := opts.maxUnusedBytes(stats.size.used)
|
|
||||||
|
|
||||||
for _, p := range repackCandidates {
|
|
||||||
reachedUnusedSizeAfter := (stats.size.unused-stats.size.remove-stats.size.repackrm < maxUnusedSizeAfter)
|
|
||||||
reachedRepackSize := stats.size.repack+p.unusedSize+p.usedSize >= opts.MaxRepackBytes
|
|
||||||
packIsLargeEnough := p.unusedSize+p.usedSize >= uint64(targetPackSize)
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case reachedRepackSize:
|
|
||||||
stats.packs.keep++
|
|
||||||
|
|
||||||
case p.tpe != restic.DataBlob, p.mustCompress:
|
|
||||||
// repacking non-data packs / uncompressed-trees is only limited by repackSize
|
|
||||||
repack(p.ID, p.packInfo)
|
|
||||||
|
|
||||||
case reachedUnusedSizeAfter && packIsLargeEnough:
|
|
||||||
// for all other packs stop repacking if tolerated unused size is reached.
|
|
||||||
stats.packs.keep++
|
|
||||||
|
|
||||||
default:
|
|
||||||
repack(p.ID, p.packInfo)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stats.packs.unref = uint(len(removePacksFirst))
|
|
||||||
stats.packs.repack = uint(len(repackPacks))
|
|
||||||
stats.packs.remove = uint(len(removePacks))
|
|
||||||
|
|
||||||
if repo.Config().Version < 2 {
|
|
||||||
// compression not supported for repository format version 1
|
|
||||||
stats.size.uncompressed = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return prunePlan{removePacksFirst: removePacksFirst,
|
|
||||||
removePacks: removePacks,
|
|
||||||
repackPacks: repackPacks,
|
|
||||||
ignorePacks: ignorePacks,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// printPruneStats prints out the statistics
|
// printPruneStats prints out the statistics
|
||||||
func printPruneStats(stats pruneStats) error {
|
func printPruneStats(printer progress.Printer, stats repository.PruneStats) error {
|
||||||
Verboseff("\nused: %10d blobs / %s\n", stats.blobs.used, ui.FormatBytes(stats.size.used))
|
printer.V("\nused: %10d blobs / %s\n", stats.Blobs.Used, ui.FormatBytes(stats.Size.Used))
|
||||||
if stats.blobs.duplicate > 0 {
|
if stats.Blobs.Duplicate > 0 {
|
||||||
Verboseff("duplicates: %10d blobs / %s\n", stats.blobs.duplicate, ui.FormatBytes(stats.size.duplicate))
|
printer.V("duplicates: %10d blobs / %s\n", stats.Blobs.Duplicate, ui.FormatBytes(stats.Size.Duplicate))
|
||||||
}
|
}
|
||||||
Verboseff("unused: %10d blobs / %s\n", stats.blobs.unused, ui.FormatBytes(stats.size.unused))
|
printer.V("unused: %10d blobs / %s\n", stats.Blobs.Unused, ui.FormatBytes(stats.Size.Unused))
|
||||||
if stats.size.unref > 0 {
|
if stats.Size.Unref > 0 {
|
||||||
Verboseff("unreferenced: %s\n", ui.FormatBytes(stats.size.unref))
|
printer.V("unreferenced: %s\n", ui.FormatBytes(stats.Size.Unref))
|
||||||
}
|
}
|
||||||
totalBlobs := stats.blobs.used + stats.blobs.unused + stats.blobs.duplicate
|
totalBlobs := stats.Blobs.Used + stats.Blobs.Unused + stats.Blobs.Duplicate
|
||||||
totalSize := stats.size.used + stats.size.duplicate + stats.size.unused + stats.size.unref
|
totalSize := stats.Size.Used + stats.Size.Duplicate + stats.Size.Unused + stats.Size.Unref
|
||||||
unusedSize := stats.size.duplicate + stats.size.unused
|
unusedSize := stats.Size.Duplicate + stats.Size.Unused
|
||||||
Verboseff("total: %10d blobs / %s\n", totalBlobs, ui.FormatBytes(totalSize))
|
printer.V("total: %10d blobs / %s\n", totalBlobs, ui.FormatBytes(totalSize))
|
||||||
Verboseff("unused size: %s of total size\n", ui.FormatPercent(unusedSize, totalSize))
|
printer.V("unused size: %s of total size\n", ui.FormatPercent(unusedSize, totalSize))
|
||||||
|
|
||||||
Verbosef("\nto repack: %10d blobs / %s\n", stats.blobs.repack, ui.FormatBytes(stats.size.repack))
|
printer.P("\nto repack: %10d blobs / %s\n", stats.Blobs.Repack, ui.FormatBytes(stats.Size.Repack))
|
||||||
Verbosef("this removes: %10d blobs / %s\n", stats.blobs.repackrm, ui.FormatBytes(stats.size.repackrm))
|
printer.P("this removes: %10d blobs / %s\n", stats.Blobs.Repackrm, ui.FormatBytes(stats.Size.Repackrm))
|
||||||
Verbosef("to delete: %10d blobs / %s\n", stats.blobs.remove, ui.FormatBytes(stats.size.remove+stats.size.unref))
|
printer.P("to delete: %10d blobs / %s\n", stats.Blobs.Remove, ui.FormatBytes(stats.Size.Remove+stats.Size.Unref))
|
||||||
totalPruneSize := stats.size.remove + stats.size.repackrm + stats.size.unref
|
totalPruneSize := stats.Size.Remove + stats.Size.Repackrm + stats.Size.Unref
|
||||||
Verbosef("total prune: %10d blobs / %s\n", stats.blobs.remove+stats.blobs.repackrm, ui.FormatBytes(totalPruneSize))
|
printer.P("total prune: %10d blobs / %s\n", stats.Blobs.Remove+stats.Blobs.Repackrm, ui.FormatBytes(totalPruneSize))
|
||||||
if stats.size.uncompressed > 0 {
|
if stats.Size.Uncompressed > 0 {
|
||||||
Verbosef("not yet compressed: %s\n", ui.FormatBytes(stats.size.uncompressed))
|
printer.P("not yet compressed: %s\n", ui.FormatBytes(stats.Size.Uncompressed))
|
||||||
}
|
}
|
||||||
Verbosef("remaining: %10d blobs / %s\n", totalBlobs-(stats.blobs.remove+stats.blobs.repackrm), ui.FormatBytes(totalSize-totalPruneSize))
|
printer.P("remaining: %10d blobs / %s\n", totalBlobs-(stats.Blobs.Remove+stats.Blobs.Repackrm), ui.FormatBytes(totalSize-totalPruneSize))
|
||||||
unusedAfter := unusedSize - stats.size.remove - stats.size.repackrm
|
unusedAfter := unusedSize - stats.Size.Remove - stats.Size.Repackrm
|
||||||
Verbosef("unused size after prune: %s (%s of remaining size)\n",
|
printer.P("unused size after prune: %s (%s of remaining size)\n",
|
||||||
ui.FormatBytes(unusedAfter), ui.FormatPercent(unusedAfter, totalSize-totalPruneSize))
|
ui.FormatBytes(unusedAfter), ui.FormatPercent(unusedAfter, totalSize-totalPruneSize))
|
||||||
Verbosef("\n")
|
printer.P("\n")
|
||||||
Verboseff("totally used packs: %10d\n", stats.packs.used)
|
printer.V("totally used packs: %10d\n", stats.Packs.Used)
|
||||||
Verboseff("partly used packs: %10d\n", stats.packs.partlyUsed)
|
printer.V("partly used packs: %10d\n", stats.Packs.PartlyUsed)
|
||||||
Verboseff("unused packs: %10d\n\n", stats.packs.unused)
|
printer.V("unused packs: %10d\n\n", stats.Packs.Unused)
|
||||||
|
|
||||||
Verboseff("to keep: %10d packs\n", stats.packs.keep)
|
printer.V("to keep: %10d packs\n", stats.Packs.Keep)
|
||||||
Verboseff("to repack: %10d packs\n", stats.packs.repack)
|
printer.V("to repack: %10d packs\n", stats.Packs.Repack)
|
||||||
Verboseff("to delete: %10d packs\n", stats.packs.remove)
|
printer.V("to delete: %10d packs\n", stats.Packs.Remove)
|
||||||
if stats.packs.unref > 0 {
|
if stats.Packs.Unref > 0 {
|
||||||
Verboseff("to delete: %10d unreferenced packs\n\n", stats.packs.unref)
|
printer.V("to delete: %10d unreferenced packs\n\n", stats.Packs.Unref)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// doPrune does the actual pruning:
|
func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, printer progress.Printer) (usedBlobs restic.CountedBlobSet, err error) {
|
||||||
// - remove unreferenced packs first
|
|
||||||
// - repack given pack files while keeping the given blobs
|
|
||||||
// - rebuild the index while ignoring all files that will be deleted
|
|
||||||
// - delete the files
|
|
||||||
// plan.removePacks and plan.ignorePacks are modified in this function.
|
|
||||||
func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo restic.Repository, plan prunePlan) (err error) {
|
|
||||||
if opts.DryRun {
|
|
||||||
if !gopts.JSON && gopts.verbosity >= 2 {
|
|
||||||
Printf("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n")
|
|
||||||
if len(plan.removePacksFirst) > 0 {
|
|
||||||
Printf("Would have removed the following unreferenced packs:\n%v\n\n", plan.removePacksFirst)
|
|
||||||
}
|
|
||||||
Printf("Would have repacked and removed the following packs:\n%v\n\n", plan.repackPacks)
|
|
||||||
Printf("Would have removed the following no longer used packs:\n%v\n\n", plan.removePacks)
|
|
||||||
}
|
|
||||||
// Always quit here if DryRun was set!
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// unreferenced packs can be safely deleted first
|
|
||||||
if len(plan.removePacksFirst) != 0 {
|
|
||||||
Verbosef("deleting unreferenced packs\n")
|
|
||||||
DeleteFiles(ctx, gopts, repo, plan.removePacksFirst, restic.PackFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(plan.repackPacks) != 0 {
|
|
||||||
Verbosef("repacking packs\n")
|
|
||||||
bar := newProgressMax(!gopts.Quiet, uint64(len(plan.repackPacks)), "packs repacked")
|
|
||||||
_, err := repository.Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar)
|
|
||||||
bar.Done()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Fatal(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Also remove repacked packs
|
|
||||||
plan.removePacks.Merge(plan.repackPacks)
|
|
||||||
|
|
||||||
if len(plan.keepBlobs) != 0 {
|
|
||||||
Warnf("%v was not repacked\n\n"+
|
|
||||||
"Integrity check failed.\n"+
|
|
||||||
"Please report this error (along with the output of the 'prune' run) at\n"+
|
|
||||||
"https://github.com/restic/restic/issues/new/choose\n", plan.keepBlobs)
|
|
||||||
return errors.Fatal("internal error: blobs were not repacked")
|
|
||||||
}
|
|
||||||
|
|
||||||
// allow GC of the blob set
|
|
||||||
plan.keepBlobs = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(plan.ignorePacks) == 0 {
|
|
||||||
plan.ignorePacks = plan.removePacks
|
|
||||||
} else {
|
|
||||||
plan.ignorePacks.Merge(plan.removePacks)
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.unsafeRecovery {
|
|
||||||
Verbosef("deleting index files\n")
|
|
||||||
indexFiles := repo.Index().(*index.MasterIndex).IDs()
|
|
||||||
err = DeleteFilesChecked(ctx, gopts, repo, indexFiles, restic.IndexFile)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Fatalf("%s", err)
|
|
||||||
}
|
|
||||||
} else if len(plan.ignorePacks) != 0 {
|
|
||||||
err = rebuildIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil, false)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Fatalf("%s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(plan.removePacks) != 0 {
|
|
||||||
Verbosef("removing %d old packs\n", len(plan.removePacks))
|
|
||||||
DeleteFiles(ctx, gopts, repo, plan.removePacks, restic.PackFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.unsafeRecovery {
|
|
||||||
err = rebuildIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil, true)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Fatalf("%s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Verbosef("done\n")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func rebuildIndexFiles(ctx context.Context, gopts GlobalOptions, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool) error {
|
|
||||||
Verbosef("rebuilding index\n")
|
|
||||||
|
|
||||||
bar := newProgressMax(!gopts.Quiet, 0, "packs processed")
|
|
||||||
return repo.Index().Save(ctx, repo, removePacks, extraObsolete, restic.MasterIndexSaveOpts{
|
|
||||||
SaveProgress: bar,
|
|
||||||
DeleteProgress: func() *progress.Counter {
|
|
||||||
return newProgressMax(!gopts.Quiet, 0, "old indexes deleted")
|
|
||||||
},
|
|
||||||
DeleteReport: func(id restic.ID, _ error) {
|
|
||||||
if gopts.verbosity > 2 {
|
|
||||||
Verbosef("removed index %v\n", id.String())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
SkipDeletion: skipDeletion,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, quiet bool) (usedBlobs restic.CountedBlobSet, err error) {
|
|
||||||
var snapshotTrees restic.IDs
|
var snapshotTrees restic.IDs
|
||||||
Verbosef("loading all snapshots...\n")
|
printer.P("loading all snapshots...\n")
|
||||||
err = restic.ForAllSnapshots(ctx, repo, repo, ignoreSnapshots,
|
err = restic.ForAllSnapshots(ctx, repo, repo, ignoreSnapshots,
|
||||||
func(id restic.ID, sn *restic.Snapshot, err error) error {
|
func(id restic.ID, sn *restic.Snapshot, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -819,11 +275,12 @@ func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots r
|
||||||
return nil, errors.Fatalf("failed loading snapshot: %v", err)
|
return nil, errors.Fatalf("failed loading snapshot: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
Verbosef("finding data that is still in use for %d snapshots\n", len(snapshotTrees))
|
printer.P("finding data that is still in use for %d snapshots\n", len(snapshotTrees))
|
||||||
|
|
||||||
usedBlobs = restic.NewCountedBlobSet()
|
usedBlobs = restic.NewCountedBlobSet()
|
||||||
|
|
||||||
bar := newProgressMax(!quiet, uint64(len(snapshotTrees)), "snapshots")
|
bar := printer.NewCounter("snapshots")
|
||||||
|
bar.SetMax(uint64(len(snapshotTrees)))
|
||||||
defer bar.Done()
|
defer bar.Done()
|
||||||
|
|
||||||
err = restic.FindUsedBlobs(ctx, repo, snapshotTrees, usedBlobs, bar)
|
err = restic.FindUsedBlobs(ctx, repo, snapshotTrees, usedBlobs, bar)
|
||||||
|
|
|
@ -7,7 +7,9 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/restic/restic/internal/backend"
|
"github.com/restic/restic/internal/backend"
|
||||||
|
"github.com/restic/restic/internal/repository"
|
||||||
rtest "github.com/restic/restic/internal/test"
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
"github.com/restic/restic/internal/ui/termstatus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) {
|
func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) {
|
||||||
|
@ -16,7 +18,9 @@ func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) {
|
||||||
defer func() {
|
defer func() {
|
||||||
gopts.backendTestHook = oldHook
|
gopts.backendTestHook = oldHook
|
||||||
}()
|
}()
|
||||||
rtest.OK(t, runPrune(context.TODO(), opts, gopts))
|
rtest.OK(t, withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||||
|
return runPrune(context.TODO(), opts, gopts, term)
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPrune(t *testing.T) {
|
func TestPrune(t *testing.T) {
|
||||||
|
@ -31,7 +35,7 @@ func testPruneVariants(t *testing.T, unsafeNoSpaceRecovery bool) {
|
||||||
}
|
}
|
||||||
t.Run("0"+suffix, func(t *testing.T) {
|
t.Run("0"+suffix, func(t *testing.T) {
|
||||||
opts := PruneOptions{MaxUnused: "0%", unsafeRecovery: unsafeNoSpaceRecovery}
|
opts := PruneOptions{MaxUnused: "0%", unsafeRecovery: unsafeNoSpaceRecovery}
|
||||||
checkOpts := CheckOptions{ReadData: true, CheckUnused: true}
|
checkOpts := CheckOptions{ReadData: true, CheckUnused: !unsafeNoSpaceRecovery}
|
||||||
testPrune(t, opts, checkOpts)
|
testPrune(t, opts, checkOpts)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -84,7 +88,9 @@ func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) {
|
||||||
pruneOpts := PruneOptions{
|
pruneOpts := PruneOptions{
|
||||||
MaxUnused: "5%",
|
MaxUnused: "5%",
|
||||||
}
|
}
|
||||||
return runForget(context.TODO(), opts, pruneOpts, gopts, args)
|
return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||||
|
return runForget(context.TODO(), opts, pruneOpts, gopts, term, args)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
rtest.OK(t, err)
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
@ -138,7 +144,9 @@ func TestPruneWithDamagedRepository(t *testing.T) {
|
||||||
env.gopts.backendTestHook = oldHook
|
env.gopts.backendTestHook = oldHook
|
||||||
}()
|
}()
|
||||||
// prune should fail
|
// prune should fail
|
||||||
rtest.Assert(t, runPrune(context.TODO(), pruneDefaultOptions, env.gopts) == errorPacksMissing,
|
rtest.Assert(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||||
|
return runPrune(context.TODO(), pruneDefaultOptions, env.gopts, term)
|
||||||
|
}) == repository.ErrPacksMissing,
|
||||||
"prune should have reported index not complete error")
|
"prune should have reported index not complete error")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -218,7 +226,9 @@ func testEdgeCaseRepo(t *testing.T, tarfile string, optionsCheck CheckOptions, o
|
||||||
testRunPrune(t, env.gopts, optionsPrune)
|
testRunPrune(t, env.gopts, optionsPrune)
|
||||||
testRunCheck(t, env.gopts)
|
testRunCheck(t, env.gopts)
|
||||||
} else {
|
} else {
|
||||||
rtest.Assert(t, runPrune(context.TODO(), optionsPrune, env.gopts) != nil,
|
rtest.Assert(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||||
|
return runPrune(context.TODO(), optionsPrune, env.gopts, term)
|
||||||
|
}) != nil,
|
||||||
"prune should have reported an error")
|
"prune should have reported an error")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,16 +61,22 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error {
|
||||||
// tree. If it is not referenced, we have a root tree.
|
// tree. If it is not referenced, we have a root tree.
|
||||||
trees := make(map[restic.ID]bool)
|
trees := make(map[restic.ID]bool)
|
||||||
|
|
||||||
repo.Index().Each(ctx, func(blob restic.PackedBlob) {
|
err = repo.Index().Each(ctx, func(blob restic.PackedBlob) {
|
||||||
if blob.Type == restic.TreeBlob {
|
if blob.Type == restic.TreeBlob {
|
||||||
trees[blob.Blob.ID] = false
|
trees[blob.Blob.ID] = false
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
Verbosef("load %d trees\n", len(trees))
|
Verbosef("load %d trees\n", len(trees))
|
||||||
bar = newProgressMax(!gopts.Quiet, uint64(len(trees)), "trees loaded")
|
bar = newProgressMax(!gopts.Quiet, uint64(len(trees)), "trees loaded")
|
||||||
for id := range trees {
|
for id := range trees {
|
||||||
tree, err := restic.LoadTree(ctx, repo, id)
|
tree, err := restic.LoadTree(ctx, repo, id)
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Warnf("unable to load tree %v: %v\n", id.Str(), err)
|
Warnf("unable to load tree %v: %v\n", id.Str(), err)
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -3,10 +3,8 @@ package main
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/restic/restic/internal/index"
|
|
||||||
"github.com/restic/restic/internal/pack"
|
|
||||||
"github.com/restic/restic/internal/repository"
|
"github.com/restic/restic/internal/repository"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/ui/termstatus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
@ -25,7 +23,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
|
||||||
`,
|
`,
|
||||||
DisableAutoGenTag: true,
|
DisableAutoGenTag: true,
|
||||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||||
return runRebuildIndex(cmd.Context(), repairIndexOptions, globalOptions)
|
term, cancel := setupTermstatus()
|
||||||
|
defer cancel()
|
||||||
|
return runRebuildIndex(cmd.Context(), repairIndexOptions, globalOptions, term)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,105 +55,22 @@ func init() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions) error {
|
func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions, term *termstatus.Terminal) error {
|
||||||
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false)
|
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer unlock()
|
defer unlock()
|
||||||
|
|
||||||
return rebuildIndex(ctx, opts, gopts, repo)
|
printer := newTerminalProgressPrinter(gopts.verbosity, term)
|
||||||
}
|
|
||||||
|
|
||||||
func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions, repo *repository.Repository) error {
|
err = repository.RepairIndex(ctx, repo, repository.RepairIndexOptions{
|
||||||
var obsoleteIndexes restic.IDs
|
ReadAllPacks: opts.ReadAllPacks,
|
||||||
packSizeFromList := make(map[restic.ID]int64)
|
}, printer)
|
||||||
packSizeFromIndex := make(map[restic.ID]int64)
|
|
||||||
removePacks := restic.NewIDSet()
|
|
||||||
|
|
||||||
if opts.ReadAllPacks {
|
|
||||||
// get list of old index files but start with empty index
|
|
||||||
err := repo.List(ctx, restic.IndexFile, func(id restic.ID, _ int64) error {
|
|
||||||
obsoleteIndexes = append(obsoleteIndexes, id)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Verbosef("loading indexes...\n")
|
|
||||||
mi := index.NewMasterIndex()
|
|
||||||
err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, _ bool, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
Warnf("removing invalid index %v: %v\n", id, err)
|
|
||||||
obsoleteIndexes = append(obsoleteIndexes, id)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
mi.Insert(idx)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = mi.MergeFinalIndexes()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = repo.SetIndex(mi)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
packSizeFromIndex = pack.Size(ctx, repo.Index(), false)
|
|
||||||
}
|
|
||||||
|
|
||||||
Verbosef("getting pack files to read...\n")
|
|
||||||
err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error {
|
|
||||||
size, ok := packSizeFromIndex[id]
|
|
||||||
if !ok || size != packSize {
|
|
||||||
// Pack was not referenced in index or size does not match
|
|
||||||
packSizeFromList[id] = packSize
|
|
||||||
removePacks.Insert(id)
|
|
||||||
}
|
|
||||||
if !ok {
|
|
||||||
Warnf("adding pack file to index %v\n", id)
|
|
||||||
} else if size != packSize {
|
|
||||||
Warnf("reindexing pack file %v with unexpected size %v instead of %v\n", id, packSize, size)
|
|
||||||
}
|
|
||||||
delete(packSizeFromIndex, id)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for id := range packSizeFromIndex {
|
|
||||||
// forget pack files that are referenced in the index but do not exist
|
|
||||||
// when rebuilding the index
|
|
||||||
removePacks.Insert(id)
|
|
||||||
Warnf("removing not found pack file %v\n", id)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(packSizeFromList) > 0 {
|
|
||||||
Verbosef("reading pack files\n")
|
|
||||||
bar := newProgressMax(!gopts.Quiet, uint64(len(packSizeFromList)), "packs")
|
|
||||||
invalidFiles, err := repo.CreateIndexFromPacks(ctx, packSizeFromList, bar)
|
|
||||||
bar.Done()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, id := range invalidFiles {
|
|
||||||
Verboseff("skipped incomplete pack file: %v\n", id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = rebuildIndexFiles(ctx, gopts, repo, removePacks, obsoleteIndexes, false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
Verbosef("done\n")
|
|
||||||
|
|
||||||
|
printer.P("done\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,12 +13,15 @@ import (
|
||||||
"github.com/restic/restic/internal/index"
|
"github.com/restic/restic/internal/index"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
rtest "github.com/restic/restic/internal/test"
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
"github.com/restic/restic/internal/ui/termstatus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) {
|
func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) {
|
||||||
rtest.OK(t, withRestoreGlobalOptions(func() error {
|
rtest.OK(t, withRestoreGlobalOptions(func() error {
|
||||||
globalOptions.stdout = io.Discard
|
return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||||
return runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts)
|
globalOptions.stdout = io.Discard
|
||||||
|
return runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts, term)
|
||||||
|
})
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -126,12 +129,13 @@ func TestRebuildIndexFailsOnAppendOnly(t *testing.T) {
|
||||||
rtest.SetupTarTestFixture(t, env.base, datafile)
|
rtest.SetupTarTestFixture(t, env.base, datafile)
|
||||||
|
|
||||||
err := withRestoreGlobalOptions(func() error {
|
err := withRestoreGlobalOptions(func() error {
|
||||||
globalOptions.stdout = io.Discard
|
|
||||||
|
|
||||||
env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) {
|
env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) {
|
||||||
return &appendOnlyBackend{r}, nil
|
return &appendOnlyBackend{r}, nil
|
||||||
}
|
}
|
||||||
return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts)
|
return withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||||
|
globalOptions.stdout = io.Discard
|
||||||
|
return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts, term)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
|
|
@ -58,14 +58,14 @@ func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.T
|
||||||
}
|
}
|
||||||
defer unlock()
|
defer unlock()
|
||||||
|
|
||||||
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
printer := newTerminalProgressPrinter(gopts.verbosity, term)
|
||||||
|
|
||||||
|
bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term)
|
||||||
err = repo.LoadIndex(ctx, bar)
|
err = repo.LoadIndex(ctx, bar)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Fatalf("%s", err)
|
return errors.Fatalf("%s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
printer := newTerminalProgressPrinter(gopts.verbosity, term)
|
|
||||||
|
|
||||||
printer.P("saving backup copies of pack files to current folder")
|
printer.P("saving backup copies of pack files to current folder")
|
||||||
for id := range ids {
|
for id := range ids {
|
||||||
f, err := os.OpenFile("pack-"+id.String(), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o666)
|
f, err := os.OpenFile("pack-"+id.String(), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o666)
|
||||||
|
@ -82,6 +82,10 @@ func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.T
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
_ = f.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := f.Close(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -145,6 +145,9 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt
|
||||||
changedCount++
|
changedCount++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
Verbosef("\n")
|
Verbosef("\n")
|
||||||
if changedCount == 0 {
|
if changedCount == 0 {
|
||||||
|
|
|
@ -294,6 +294,9 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a
|
||||||
changedCount++
|
changedCount++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
Verbosef("\n")
|
Verbosef("\n")
|
||||||
if changedCount == 0 {
|
if changedCount == 0 {
|
||||||
|
|
|
@ -69,6 +69,9 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions
|
||||||
for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) {
|
for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) {
|
||||||
snapshots = append(snapshots, sn)
|
snapshots = append(snapshots, sn)
|
||||||
}
|
}
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
snapshotGroups, grouped, err := restic.GroupSnapshots(snapshots, opts.GroupBy)
|
snapshotGroups, grouped, err := restic.GroupSnapshots(snapshots, opts.GroupBy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -38,7 +38,7 @@ depending on what you are trying to calculate.
|
||||||
The modes are:
|
The modes are:
|
||||||
|
|
||||||
* restore-size: (default) Counts the size of the restored files.
|
* restore-size: (default) Counts the size of the restored files.
|
||||||
* files-by-contents: Counts total size of files, where a file is
|
* files-by-contents: Counts total size of unique files, where a file is
|
||||||
considered unique if it has unique contents.
|
considered unique if it has unique contents.
|
||||||
* raw-data: Counts the size of blobs in the repository, regardless of
|
* raw-data: Counts the size of blobs in the repository, regardless of
|
||||||
how many files reference them.
|
how many files reference them.
|
||||||
|
@ -117,9 +117,8 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args
|
||||||
return fmt.Errorf("error walking snapshot: %v", err)
|
return fmt.Errorf("error walking snapshot: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if ctx.Err() != nil {
|
||||||
if err != nil {
|
return ctx.Err()
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.countMode == countModeRawData {
|
if opts.countMode == countModeRawData {
|
||||||
|
@ -352,7 +351,10 @@ func statsDebug(ctx context.Context, repo restic.Repository) error {
|
||||||
Warnf("File Type: %v\n%v\n", t, hist)
|
Warnf("File Type: %v\n%v\n", t, hist)
|
||||||
}
|
}
|
||||||
|
|
||||||
hist := statsDebugBlobs(ctx, repo)
|
hist, err := statsDebugBlobs(ctx, repo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} {
|
for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} {
|
||||||
Warnf("Blob Type: %v\n%v\n\n", t, hist[t])
|
Warnf("Blob Type: %v\n%v\n\n", t, hist[t])
|
||||||
}
|
}
|
||||||
|
@ -370,17 +372,17 @@ func statsDebugFileType(ctx context.Context, repo restic.Lister, tpe restic.File
|
||||||
return hist, err
|
return hist, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func statsDebugBlobs(ctx context.Context, repo restic.Repository) [restic.NumBlobTypes]*sizeHistogram {
|
func statsDebugBlobs(ctx context.Context, repo restic.Repository) ([restic.NumBlobTypes]*sizeHistogram, error) {
|
||||||
var hist [restic.NumBlobTypes]*sizeHistogram
|
var hist [restic.NumBlobTypes]*sizeHistogram
|
||||||
for i := 0; i < len(hist); i++ {
|
for i := 0; i < len(hist); i++ {
|
||||||
hist[i] = newSizeHistogram(2 * chunker.MaxSize)
|
hist[i] = newSizeHistogram(2 * chunker.MaxSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
repo.Index().Each(ctx, func(pb restic.PackedBlob) {
|
err := repo.Index().Each(ctx, func(pb restic.PackedBlob) {
|
||||||
hist[pb.Type].Add(uint64(pb.Length))
|
hist[pb.Type].Add(uint64(pb.Length))
|
||||||
})
|
})
|
||||||
|
|
||||||
return hist
|
return hist, err
|
||||||
}
|
}
|
||||||
|
|
||||||
type sizeClass struct {
|
type sizeClass struct {
|
||||||
|
|
|
@ -122,6 +122,9 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st
|
||||||
changeCnt++
|
changeCnt++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
if changeCnt == 0 {
|
if changeCnt == 0 {
|
||||||
Verbosef("no snapshots were modified\n")
|
Verbosef("no snapshots were modified\n")
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1,41 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/restic/restic/internal/restic"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DeleteFiles deletes the given fileList of fileType in parallel
|
|
||||||
// it will print a warning if there is an error, but continue deleting the remaining files
|
|
||||||
func DeleteFiles(ctx context.Context, gopts GlobalOptions, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) {
|
|
||||||
_ = deleteFiles(ctx, gopts, true, repo, fileList, fileType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteFilesChecked deletes the given fileList of fileType in parallel
|
|
||||||
// if an error occurs, it will cancel and return this error
|
|
||||||
func DeleteFilesChecked(ctx context.Context, gopts GlobalOptions, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) error {
|
|
||||||
return deleteFiles(ctx, gopts, false, repo, fileList, fileType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// deleteFiles deletes the given fileList of fileType in parallel
|
|
||||||
// if ignoreError=true, it will print a warning if there was an error, else it will abort.
|
|
||||||
func deleteFiles(ctx context.Context, gopts GlobalOptions, ignoreError bool, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) error {
|
|
||||||
bar := newProgressMax(!gopts.JSON && !gopts.Quiet, 0, "files deleted")
|
|
||||||
defer bar.Done()
|
|
||||||
|
|
||||||
return restic.ParallelRemove(ctx, repo, fileList, fileType, func(id restic.ID, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
if !gopts.JSON {
|
|
||||||
Warnf("unable to remove %v/%v from the repository\n", fileType, id)
|
|
||||||
}
|
|
||||||
if !ignoreError {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !gopts.JSON && gopts.verbosity > 2 {
|
|
||||||
Verbosef("removed %v/%v\n", fileType, id)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}, bar)
|
|
||||||
}
|
|
|
@ -2,6 +2,7 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"os"
|
||||||
|
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
|
@ -14,17 +15,27 @@ func initMultiSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter,
|
||||||
if !addHostShorthand {
|
if !addHostShorthand {
|
||||||
hostShorthand = ""
|
hostShorthand = ""
|
||||||
}
|
}
|
||||||
flags.StringArrayVarP(&filt.Hosts, "host", hostShorthand, nil, "only consider snapshots for this `host` (can be specified multiple times)")
|
flags.StringArrayVarP(&filt.Hosts, "host", hostShorthand, nil, "only consider snapshots for this `host` (can be specified multiple times) (default: $RESTIC_HOST)")
|
||||||
flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]` (can be specified multiple times)")
|
flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]` (can be specified multiple times)")
|
||||||
flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path` (can be specified multiple times)")
|
flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path` (can be specified multiple times)")
|
||||||
|
|
||||||
|
// set default based on env if set
|
||||||
|
if host := os.Getenv("RESTIC_HOST"); host != "" {
|
||||||
|
filt.Hosts = []string{host}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// initSingleSnapshotFilter is used for commands that work on a single snapshot
|
// initSingleSnapshotFilter is used for commands that work on a single snapshot
|
||||||
// MUST be combined with restic.FindFilteredSnapshot
|
// MUST be combined with restic.FindFilteredSnapshot
|
||||||
func initSingleSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter) {
|
func initSingleSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter) {
|
||||||
flags.StringArrayVarP(&filt.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when snapshot ID \"latest\" is given (can be specified multiple times)")
|
flags.StringArrayVarP(&filt.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when snapshot ID \"latest\" is given (can be specified multiple times) (default: $RESTIC_HOST)")
|
||||||
flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]`, when snapshot ID \"latest\" is given (can be specified multiple times)")
|
flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]`, when snapshot ID \"latest\" is given (can be specified multiple times)")
|
||||||
flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path`, when snapshot ID \"latest\" is given (can be specified multiple times)")
|
flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path`, when snapshot ID \"latest\" is given (can be specified multiple times)")
|
||||||
|
|
||||||
|
// set default based on env if set
|
||||||
|
if host := os.Getenv("RESTIC_HOST"); host != "" {
|
||||||
|
filt.Hosts = []string{host}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindFilteredSnapshots yields Snapshots, either given explicitly by `snapshotIDs` or filtered from the list of all snapshots.
|
// FindFilteredSnapshots yields Snapshots, either given explicitly by `snapshotIDs` or filtered from the list of all snapshots.
|
||||||
|
|
|
@ -0,0 +1,61 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
"github.com/spf13/pflag"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSnapshotFilter(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
name string
|
||||||
|
args []string
|
||||||
|
expected []string
|
||||||
|
env string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"no value",
|
||||||
|
[]string{},
|
||||||
|
nil,
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"args only",
|
||||||
|
[]string{"--host", "abc"},
|
||||||
|
[]string{"abc"},
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"env default",
|
||||||
|
[]string{},
|
||||||
|
[]string{"def"},
|
||||||
|
"def",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"both",
|
||||||
|
[]string{"--host", "abc"},
|
||||||
|
[]string{"abc"},
|
||||||
|
"def",
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
t.Setenv("RESTIC_HOST", test.env)
|
||||||
|
|
||||||
|
for _, mode := range []bool{false, true} {
|
||||||
|
set := pflag.NewFlagSet("test", pflag.PanicOnError)
|
||||||
|
flt := &restic.SnapshotFilter{}
|
||||||
|
if mode {
|
||||||
|
initMultiSnapshotFilter(set, flt, false)
|
||||||
|
} else {
|
||||||
|
initSingleSnapshotFilter(set, flt)
|
||||||
|
}
|
||||||
|
err := set.Parse(test.args)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
rtest.Equals(t, test.expected, flt.Hosts, "unexpected hosts")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -43,7 +43,7 @@ import (
|
||||||
"golang.org/x/term"
|
"golang.org/x/term"
|
||||||
)
|
)
|
||||||
|
|
||||||
var version = "0.16.4-dev (compiled manually)"
|
const version = "0.16.4-dev (compiled manually)"
|
||||||
|
|
||||||
// TimeFormat is the format used for all timestamps printed by restic.
|
// TimeFormat is the format used for all timestamps printed by restic.
|
||||||
const TimeFormat = "2006-01-02 15:04:05"
|
const TimeFormat = "2006-01-02 15:04:05"
|
||||||
|
@ -96,9 +96,6 @@ var globalOptions = GlobalOptions{
|
||||||
stderr: os.Stderr,
|
stderr: os.Stderr,
|
||||||
}
|
}
|
||||||
|
|
||||||
var isReadingPassword bool
|
|
||||||
var internalGlobalCtx context.Context
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
backends := location.NewRegistry()
|
backends := location.NewRegistry()
|
||||||
backends.Register(azure.NewFactory())
|
backends.Register(azure.NewFactory())
|
||||||
|
@ -112,15 +109,6 @@ func init() {
|
||||||
backends.Register(swift.NewFactory())
|
backends.Register(swift.NewFactory())
|
||||||
globalOptions.backends = backends
|
globalOptions.backends = backends
|
||||||
|
|
||||||
var cancel context.CancelFunc
|
|
||||||
internalGlobalCtx, cancel = context.WithCancel(context.Background())
|
|
||||||
AddCleanupHandler(func(code int) (int, error) {
|
|
||||||
// Must be called before the unlock cleanup handler to ensure that the latter is
|
|
||||||
// not blocked due to limited number of backend connections, see #1434
|
|
||||||
cancel()
|
|
||||||
return code, nil
|
|
||||||
})
|
|
||||||
|
|
||||||
f := cmdRoot.PersistentFlags()
|
f := cmdRoot.PersistentFlags()
|
||||||
f.StringVarP(&globalOptions.Repo, "repo", "r", "", "`repository` to backup to or restore from (default: $RESTIC_REPOSITORY)")
|
f.StringVarP(&globalOptions.Repo, "repo", "r", "", "`repository` to backup to or restore from (default: $RESTIC_REPOSITORY)")
|
||||||
f.StringVarP(&globalOptions.RepositoryFile, "repository-file", "", "", "`file` to read the repository location from (default: $RESTIC_REPOSITORY_FILE)")
|
f.StringVarP(&globalOptions.RepositoryFile, "repository-file", "", "", "`file` to read the repository location from (default: $RESTIC_REPOSITORY_FILE)")
|
||||||
|
@ -165,8 +153,6 @@ func init() {
|
||||||
// parse target pack size from env, on error the default value will be used
|
// parse target pack size from env, on error the default value will be used
|
||||||
targetPackSize, _ := strconv.ParseUint(os.Getenv("RESTIC_PACK_SIZE"), 10, 32)
|
targetPackSize, _ := strconv.ParseUint(os.Getenv("RESTIC_PACK_SIZE"), 10, 32)
|
||||||
globalOptions.PackSize = uint(targetPackSize)
|
globalOptions.PackSize = uint(targetPackSize)
|
||||||
|
|
||||||
restoreTerminal()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func stdinIsTerminal() bool {
|
func stdinIsTerminal() bool {
|
||||||
|
@ -191,40 +177,6 @@ func stdoutTerminalWidth() int {
|
||||||
return w
|
return w
|
||||||
}
|
}
|
||||||
|
|
||||||
// restoreTerminal installs a cleanup handler that restores the previous
|
|
||||||
// terminal state on exit. This handler is only intended to restore the
|
|
||||||
// terminal configuration if restic exits after receiving a signal. A regular
|
|
||||||
// program execution must revert changes to the terminal configuration itself.
|
|
||||||
// The terminal configuration is only restored while reading a password.
|
|
||||||
func restoreTerminal() {
|
|
||||||
if !term.IsTerminal(int(os.Stdout.Fd())) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fd := int(os.Stdout.Fd())
|
|
||||||
state, err := term.GetState(fd)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
AddCleanupHandler(func(code int) (int, error) {
|
|
||||||
// Restoring the terminal configuration while restic runs in the
|
|
||||||
// background, causes restic to get stopped on unix systems with
|
|
||||||
// a SIGTTOU signal. Thus only restore the terminal settings if
|
|
||||||
// they might have been modified, which is the case while reading
|
|
||||||
// a password.
|
|
||||||
if !isReadingPassword {
|
|
||||||
return code, nil
|
|
||||||
}
|
|
||||||
err := term.Restore(fd, state)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err)
|
|
||||||
}
|
|
||||||
return code, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearLine creates a platform dependent string to clear the current
|
// ClearLine creates a platform dependent string to clear the current
|
||||||
// line, so it can be overwritten.
|
// line, so it can be overwritten.
|
||||||
//
|
//
|
||||||
|
@ -333,24 +285,48 @@ func readPassword(in io.Reader) (password string, err error) {
|
||||||
|
|
||||||
// readPasswordTerminal reads the password from the given reader which must be a
|
// readPasswordTerminal reads the password from the given reader which must be a
|
||||||
// tty. Prompt is printed on the writer out before attempting to read the
|
// tty. Prompt is printed on the writer out before attempting to read the
|
||||||
// password.
|
// password. If the context is canceled, the function leaks the password reading
|
||||||
func readPasswordTerminal(in *os.File, out io.Writer, prompt string) (password string, err error) {
|
// goroutine.
|
||||||
fmt.Fprint(out, prompt)
|
func readPasswordTerminal(ctx context.Context, in *os.File, out *os.File, prompt string) (password string, err error) {
|
||||||
isReadingPassword = true
|
fd := int(out.Fd())
|
||||||
buf, err := term.ReadPassword(int(in.Fd()))
|
state, err := term.GetState(fd)
|
||||||
isReadingPassword = false
|
if err != nil {
|
||||||
fmt.Fprintln(out)
|
fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err)
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
done := make(chan struct{})
|
||||||
|
var buf []byte
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(done)
|
||||||
|
fmt.Fprint(out, prompt)
|
||||||
|
buf, err = term.ReadPassword(int(in.Fd()))
|
||||||
|
fmt.Fprintln(out)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
err := term.Restore(fd, state)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err)
|
||||||
|
}
|
||||||
|
return "", ctx.Err()
|
||||||
|
case <-done:
|
||||||
|
// clean shutdown, nothing to do
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "ReadPassword")
|
return "", errors.Wrap(err, "ReadPassword")
|
||||||
}
|
}
|
||||||
|
|
||||||
password = string(buf)
|
return string(buf), nil
|
||||||
return password, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadPassword reads the password from a password file, the environment
|
// ReadPassword reads the password from a password file, the environment
|
||||||
// variable RESTIC_PASSWORD or prompts the user.
|
// variable RESTIC_PASSWORD or prompts the user. If the context is canceled,
|
||||||
func ReadPassword(opts GlobalOptions, prompt string) (string, error) {
|
// the function leaks the password reading goroutine.
|
||||||
|
func ReadPassword(ctx context.Context, opts GlobalOptions, prompt string) (string, error) {
|
||||||
if opts.password != "" {
|
if opts.password != "" {
|
||||||
return opts.password, nil
|
return opts.password, nil
|
||||||
}
|
}
|
||||||
|
@ -361,7 +337,7 @@ func ReadPassword(opts GlobalOptions, prompt string) (string, error) {
|
||||||
)
|
)
|
||||||
|
|
||||||
if stdinIsTerminal() {
|
if stdinIsTerminal() {
|
||||||
password, err = readPasswordTerminal(os.Stdin, os.Stderr, prompt)
|
password, err = readPasswordTerminal(ctx, os.Stdin, os.Stderr, prompt)
|
||||||
} else {
|
} else {
|
||||||
password, err = readPassword(os.Stdin)
|
password, err = readPassword(os.Stdin)
|
||||||
Verbosef("reading repository password from stdin\n")
|
Verbosef("reading repository password from stdin\n")
|
||||||
|
@ -379,14 +355,15 @@ func ReadPassword(opts GlobalOptions, prompt string) (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadPasswordTwice calls ReadPassword two times and returns an error when the
|
// ReadPasswordTwice calls ReadPassword two times and returns an error when the
|
||||||
// passwords don't match.
|
// passwords don't match. If the context is canceled, the function leaks the
|
||||||
func ReadPasswordTwice(gopts GlobalOptions, prompt1, prompt2 string) (string, error) {
|
// password reading goroutine.
|
||||||
pw1, err := ReadPassword(gopts, prompt1)
|
func ReadPasswordTwice(ctx context.Context, gopts GlobalOptions, prompt1, prompt2 string) (string, error) {
|
||||||
|
pw1, err := ReadPassword(ctx, gopts, prompt1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
if stdinIsTerminal() {
|
if stdinIsTerminal() {
|
||||||
pw2, err := ReadPassword(gopts, prompt2)
|
pw2, err := ReadPassword(ctx, gopts, prompt2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -469,7 +446,10 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi
|
||||||
}
|
}
|
||||||
|
|
||||||
for ; passwordTriesLeft > 0; passwordTriesLeft-- {
|
for ; passwordTriesLeft > 0; passwordTriesLeft-- {
|
||||||
opts.password, err = ReadPassword(opts, "enter password for repository: ")
|
opts.password, err = ReadPassword(ctx, opts, "enter password for repository: ")
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return nil, ctx.Err()
|
||||||
|
}
|
||||||
if err != nil && passwordTriesLeft > 1 {
|
if err != nil && passwordTriesLeft > 1 {
|
||||||
opts.password = ""
|
opts.password = ""
|
||||||
fmt.Printf("%s. Try again\n", err)
|
fmt.Printf("%s. Try again\n", err)
|
||||||
|
@ -570,16 +550,13 @@ func parseConfig(loc location.Location, opts options.Options) (interface{}, erro
|
||||||
return cfg, nil
|
return cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open the backend specified by a location config.
|
func innerOpen(ctx context.Context, s string, gopts GlobalOptions, opts options.Options, create bool) (backend.Backend, error) {
|
||||||
func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) {
|
|
||||||
debug.Log("parsing location %v", location.StripPassword(gopts.backends, s))
|
debug.Log("parsing location %v", location.StripPassword(gopts.backends, s))
|
||||||
loc, err := location.Parse(gopts.backends, s)
|
loc, err := location.Parse(gopts.backends, s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Fatalf("parsing repository location failed: %v", err)
|
return nil, errors.Fatalf("parsing repository location failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var be backend.Backend
|
|
||||||
|
|
||||||
cfg, err := parseConfig(loc, opts)
|
cfg, err := parseConfig(loc, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -599,7 +576,13 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
|
||||||
return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
|
return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
be, err = factory.Open(ctx, cfg, rt, lim)
|
var be backend.Backend
|
||||||
|
if create {
|
||||||
|
be, err = factory.Create(ctx, cfg, rt, lim)
|
||||||
|
} else {
|
||||||
|
be, err = factory.Open(ctx, cfg, rt, lim)
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Fatalf("unable to open repository at %v: %v", location.StripPassword(gopts.backends, s), err)
|
return nil, errors.Fatalf("unable to open repository at %v: %v", location.StripPassword(gopts.backends, s), err)
|
||||||
}
|
}
|
||||||
|
@ -615,6 +598,17 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return be, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open the backend specified by a location config.
|
||||||
|
func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) {
|
||||||
|
|
||||||
|
be, err := innerOpen(ctx, s, gopts, opts, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// check if config is there
|
// check if config is there
|
||||||
fi, err := be.Stat(ctx, backend.Handle{Type: restic.ConfigFile})
|
fi, err := be.Stat(ctx, backend.Handle{Type: restic.ConfigFile})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -630,31 +624,5 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
|
||||||
|
|
||||||
// Create the backend specified by URI.
|
// Create the backend specified by URI.
|
||||||
func create(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) {
|
func create(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) {
|
||||||
debug.Log("parsing location %v", location.StripPassword(gopts.backends, s))
|
return innerOpen(ctx, s, gopts, opts, true)
|
||||||
loc, err := location.Parse(gopts.backends, s)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg, err := parseConfig(loc, opts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rt, err := backend.Transport(globalOptions.TransportOptions)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Fatal(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
factory := gopts.backends.Lookup(loc.Scheme)
|
|
||||||
if factory == nil {
|
|
||||||
return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
|
|
||||||
}
|
|
||||||
|
|
||||||
be, err := factory.Create(ctx, cfg, rt, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return logger.New(sema.NewBackend(be)), nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,23 +15,28 @@ import (
|
||||||
"github.com/pkg/profile"
|
"github.com/pkg/profile"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
type ProfileOptions struct {
|
||||||
listenProfile string
|
listen string
|
||||||
memProfilePath string
|
memPath string
|
||||||
cpuProfilePath string
|
cpuPath string
|
||||||
traceProfilePath string
|
tracePath string
|
||||||
blockProfilePath string
|
blockPath string
|
||||||
insecure bool
|
insecure bool
|
||||||
)
|
}
|
||||||
|
|
||||||
|
var profileOpts ProfileOptions
|
||||||
|
var prof interface {
|
||||||
|
Stop()
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
f := cmdRoot.PersistentFlags()
|
f := cmdRoot.PersistentFlags()
|
||||||
f.StringVar(&listenProfile, "listen-profile", "", "listen on this `address:port` for memory profiling")
|
f.StringVar(&profileOpts.listen, "listen-profile", "", "listen on this `address:port` for memory profiling")
|
||||||
f.StringVar(&memProfilePath, "mem-profile", "", "write memory profile to `dir`")
|
f.StringVar(&profileOpts.memPath, "mem-profile", "", "write memory profile to `dir`")
|
||||||
f.StringVar(&cpuProfilePath, "cpu-profile", "", "write cpu profile to `dir`")
|
f.StringVar(&profileOpts.cpuPath, "cpu-profile", "", "write cpu profile to `dir`")
|
||||||
f.StringVar(&traceProfilePath, "trace-profile", "", "write trace to `dir`")
|
f.StringVar(&profileOpts.tracePath, "trace-profile", "", "write trace to `dir`")
|
||||||
f.StringVar(&blockProfilePath, "block-profile", "", "write block profile to `dir`")
|
f.StringVar(&profileOpts.blockPath, "block-profile", "", "write block profile to `dir`")
|
||||||
f.BoolVar(&insecure, "insecure-kdf", false, "use insecure KDF settings")
|
f.BoolVar(&profileOpts.insecure, "insecure-kdf", false, "use insecure KDF settings")
|
||||||
}
|
}
|
||||||
|
|
||||||
type fakeTestingTB struct{}
|
type fakeTestingTB struct{}
|
||||||
|
@ -41,10 +46,10 @@ func (fakeTestingTB) Logf(msg string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func runDebug() error {
|
func runDebug() error {
|
||||||
if listenProfile != "" {
|
if profileOpts.listen != "" {
|
||||||
fmt.Fprintf(os.Stderr, "running profile HTTP server on %v\n", listenProfile)
|
fmt.Fprintf(os.Stderr, "running profile HTTP server on %v\n", profileOpts.listen)
|
||||||
go func() {
|
go func() {
|
||||||
err := http.ListenAndServe(listenProfile, nil)
|
err := http.ListenAndServe(profileOpts.listen, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "profile HTTP server listen failed: %v\n", err)
|
fmt.Fprintf(os.Stderr, "profile HTTP server listen failed: %v\n", err)
|
||||||
}
|
}
|
||||||
|
@ -52,16 +57,16 @@ func runDebug() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
profilesEnabled := 0
|
profilesEnabled := 0
|
||||||
if memProfilePath != "" {
|
if profileOpts.memPath != "" {
|
||||||
profilesEnabled++
|
profilesEnabled++
|
||||||
}
|
}
|
||||||
if cpuProfilePath != "" {
|
if profileOpts.cpuPath != "" {
|
||||||
profilesEnabled++
|
profilesEnabled++
|
||||||
}
|
}
|
||||||
if traceProfilePath != "" {
|
if profileOpts.tracePath != "" {
|
||||||
profilesEnabled++
|
profilesEnabled++
|
||||||
}
|
}
|
||||||
if blockProfilePath != "" {
|
if profileOpts.blockPath != "" {
|
||||||
profilesEnabled++
|
profilesEnabled++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,30 +74,25 @@ func runDebug() error {
|
||||||
return errors.Fatal("only one profile (memory, CPU, trace, or block) may be activated at the same time")
|
return errors.Fatal("only one profile (memory, CPU, trace, or block) may be activated at the same time")
|
||||||
}
|
}
|
||||||
|
|
||||||
var prof interface {
|
if profileOpts.memPath != "" {
|
||||||
Stop()
|
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(profileOpts.memPath))
|
||||||
|
} else if profileOpts.cpuPath != "" {
|
||||||
|
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(profileOpts.cpuPath))
|
||||||
|
} else if profileOpts.tracePath != "" {
|
||||||
|
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.TraceProfile, profile.ProfilePath(profileOpts.tracePath))
|
||||||
|
} else if profileOpts.blockPath != "" {
|
||||||
|
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.BlockProfile, profile.ProfilePath(profileOpts.blockPath))
|
||||||
}
|
}
|
||||||
|
|
||||||
if memProfilePath != "" {
|
if profileOpts.insecure {
|
||||||
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(memProfilePath))
|
|
||||||
} else if cpuProfilePath != "" {
|
|
||||||
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(cpuProfilePath))
|
|
||||||
} else if traceProfilePath != "" {
|
|
||||||
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.TraceProfile, profile.ProfilePath(traceProfilePath))
|
|
||||||
} else if blockProfilePath != "" {
|
|
||||||
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.BlockProfile, profile.ProfilePath(blockProfilePath))
|
|
||||||
}
|
|
||||||
|
|
||||||
if prof != nil {
|
|
||||||
AddCleanupHandler(func(code int) (int, error) {
|
|
||||||
prof.Stop()
|
|
||||||
return code, nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if insecure {
|
|
||||||
repository.TestUseLowSecurityKDFParameters(fakeTestingTB{})
|
repository.TestUseLowSecurityKDFParameters(fakeTestingTB{})
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func stopDebug() {
|
||||||
|
if prof != nil {
|
||||||
|
prof.Stop()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -5,3 +5,6 @@ package main
|
||||||
|
|
||||||
// runDebug is a noop without the debug tag.
|
// runDebug is a noop without the debug tag.
|
||||||
func runDebug() error { return nil }
|
func runDebug() error { return nil }
|
||||||
|
|
||||||
|
// stopDebug is a noop without the debug tag.
|
||||||
|
func stopDebug() {}
|
||||||
|
|
|
@ -252,11 +252,11 @@ func listTreePacks(gopts GlobalOptions, t *testing.T) restic.IDSet {
|
||||||
|
|
||||||
rtest.OK(t, r.LoadIndex(ctx, nil))
|
rtest.OK(t, r.LoadIndex(ctx, nil))
|
||||||
treePacks := restic.NewIDSet()
|
treePacks := restic.NewIDSet()
|
||||||
r.Index().Each(ctx, func(pb restic.PackedBlob) {
|
rtest.OK(t, r.Index().Each(ctx, func(pb restic.PackedBlob) {
|
||||||
if pb.Type == restic.TreeBlob {
|
if pb.Type == restic.TreeBlob {
|
||||||
treePacks.Insert(pb.PackID)
|
treePacks.Insert(pb.PackID)
|
||||||
}
|
}
|
||||||
})
|
}))
|
||||||
|
|
||||||
return treePacks
|
return treePacks
|
||||||
}
|
}
|
||||||
|
@ -280,11 +280,11 @@ func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, rem
|
||||||
rtest.OK(t, r.LoadIndex(ctx, nil))
|
rtest.OK(t, r.LoadIndex(ctx, nil))
|
||||||
|
|
||||||
treePacks := restic.NewIDSet()
|
treePacks := restic.NewIDSet()
|
||||||
r.Index().Each(ctx, func(pb restic.PackedBlob) {
|
rtest.OK(t, r.Index().Each(ctx, func(pb restic.PackedBlob) {
|
||||||
if pb.Type == restic.TreeBlob {
|
if pb.Type == restic.TreeBlob {
|
||||||
treePacks.Insert(pb.PackID)
|
treePacks.Insert(pb.PackID)
|
||||||
}
|
}
|
||||||
})
|
}))
|
||||||
|
|
||||||
// remove all packs containing data blobs
|
// remove all packs containing data blobs
|
||||||
rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error {
|
rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error {
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"github.com/restic/restic/internal/errors"
|
"github.com/restic/restic/internal/errors"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
rtest "github.com/restic/restic/internal/test"
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
"github.com/restic/restic/internal/ui/termstatus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCheckRestoreNoLock(t *testing.T) {
|
func TestCheckRestoreNoLock(t *testing.T) {
|
||||||
|
@ -88,8 +89,12 @@ func TestListOnce(t *testing.T) {
|
||||||
testRunPrune(t, env.gopts, pruneOpts)
|
testRunPrune(t, env.gopts, pruneOpts)
|
||||||
rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil))
|
rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil))
|
||||||
|
|
||||||
rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts))
|
rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||||
rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{ReadAllPacks: true}, env.gopts))
|
return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts, term)
|
||||||
|
}))
|
||||||
|
rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||||
|
return runRebuildIndex(context.TODO(), RepairIndexOptions{ReadAllPacks: true}, env.gopts, term)
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
type writeToOnly struct {
|
type writeToOnly struct {
|
||||||
|
|
|
@ -21,18 +21,11 @@ func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun boo
|
||||||
Verbosef("%s", msg)
|
Verbosef("%s", msg)
|
||||||
}
|
}
|
||||||
}, Warnf)
|
}, Warnf)
|
||||||
|
|
||||||
unlock = lock.Unlock
|
|
||||||
// make sure that a repository is unlocked properly and after cancel() was
|
|
||||||
// called by the cleanup handler in global.go
|
|
||||||
AddCleanupHandler(func(code int) (int, error) {
|
|
||||||
lock.Unlock()
|
|
||||||
return code, nil
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unlock = lock.Unlock
|
||||||
} else {
|
} else {
|
||||||
repo.SetDryRun()
|
repo.SetDryRun()
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ package main
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
@ -24,6 +25,8 @@ func init() {
|
||||||
_, _ = maxprocs.Set()
|
_, _ = maxprocs.Set()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var ErrOK = errors.New("ok")
|
||||||
|
|
||||||
// cmdRoot is the base command when no other command has been specified.
|
// cmdRoot is the base command when no other command has been specified.
|
||||||
var cmdRoot = &cobra.Command{
|
var cmdRoot = &cobra.Command{
|
||||||
Use: "restic",
|
Use: "restic",
|
||||||
|
@ -74,6 +77,9 @@ The full documentation can be found at https://restic.readthedocs.io/ .
|
||||||
// enabled)
|
// enabled)
|
||||||
return runDebug()
|
return runDebug()
|
||||||
},
|
},
|
||||||
|
PersistentPostRun: func(_ *cobra.Command, _ []string) {
|
||||||
|
stopDebug()
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Distinguish commands that need the password from those that work without,
|
// Distinguish commands that need the password from those that work without,
|
||||||
|
@ -88,8 +94,6 @@ func needsPassword(cmd string) bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var logBuffer = bytes.NewBuffer(nil)
|
|
||||||
|
|
||||||
func tweakGoGC() {
|
func tweakGoGC() {
|
||||||
// lower GOGC from 100 to 50, unless it was manually overwritten by the user
|
// lower GOGC from 100 to 50, unless it was manually overwritten by the user
|
||||||
oldValue := godebug.SetGCPercent(50)
|
oldValue := godebug.SetGCPercent(50)
|
||||||
|
@ -102,6 +106,7 @@ func main() {
|
||||||
tweakGoGC()
|
tweakGoGC()
|
||||||
// install custom global logger into a buffer, if an error occurs
|
// install custom global logger into a buffer, if an error occurs
|
||||||
// we can show the logs
|
// we can show the logs
|
||||||
|
logBuffer := bytes.NewBuffer(nil)
|
||||||
log.SetOutput(logBuffer)
|
log.SetOutput(logBuffer)
|
||||||
|
|
||||||
err := feature.Flag.Apply(os.Getenv("RESTIC_FEATURES"), func(s string) {
|
err := feature.Flag.Apply(os.Getenv("RESTIC_FEATURES"), func(s string) {
|
||||||
|
@ -115,7 +120,16 @@ func main() {
|
||||||
debug.Log("main %#v", os.Args)
|
debug.Log("main %#v", os.Args)
|
||||||
debug.Log("restic %s compiled with %v on %v/%v",
|
debug.Log("restic %s compiled with %v on %v/%v",
|
||||||
version, runtime.Version(), runtime.GOOS, runtime.GOARCH)
|
version, runtime.Version(), runtime.GOOS, runtime.GOARCH)
|
||||||
err = cmdRoot.ExecuteContext(internalGlobalCtx)
|
|
||||||
|
ctx := createGlobalContext()
|
||||||
|
err = cmdRoot.ExecuteContext(ctx)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
err = ctx.Err()
|
||||||
|
} else if err == ErrOK {
|
||||||
|
// ErrOK overwrites context cancelation errors
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case restic.IsAlreadyLocked(err):
|
case restic.IsAlreadyLocked(err):
|
||||||
|
@ -137,11 +151,13 @@ func main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
var exitCode int
|
var exitCode int
|
||||||
switch err {
|
switch {
|
||||||
case nil:
|
case err == nil:
|
||||||
exitCode = 0
|
exitCode = 0
|
||||||
case ErrInvalidSourceData:
|
case err == ErrInvalidSourceData:
|
||||||
exitCode = 3
|
exitCode = 3
|
||||||
|
case errors.Is(err, context.Canceled):
|
||||||
|
exitCode = 130
|
||||||
default:
|
default:
|
||||||
exitCode = 1
|
exitCode = 1
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/restic/restic/internal/errors"
|
"github.com/restic/restic/internal/errors"
|
||||||
|
@ -56,7 +57,7 @@ func initSecondaryRepoOptions(f *pflag.FlagSet, opts *secondaryRepoOptions, repo
|
||||||
opts.PasswordCommand = os.Getenv("RESTIC_FROM_PASSWORD_COMMAND")
|
opts.PasswordCommand = os.Getenv("RESTIC_FROM_PASSWORD_COMMAND")
|
||||||
}
|
}
|
||||||
|
|
||||||
func fillSecondaryGlobalOpts(opts secondaryRepoOptions, gopts GlobalOptions, repoPrefix string) (GlobalOptions, bool, error) {
|
func fillSecondaryGlobalOpts(ctx context.Context, opts secondaryRepoOptions, gopts GlobalOptions, repoPrefix string) (GlobalOptions, bool, error) {
|
||||||
if opts.Repo == "" && opts.RepositoryFile == "" && opts.LegacyRepo == "" && opts.LegacyRepositoryFile == "" {
|
if opts.Repo == "" && opts.RepositoryFile == "" && opts.LegacyRepo == "" && opts.LegacyRepositoryFile == "" {
|
||||||
return GlobalOptions{}, false, errors.Fatal("Please specify a source repository location (--from-repo or --from-repository-file)")
|
return GlobalOptions{}, false, errors.Fatal("Please specify a source repository location (--from-repo or --from-repository-file)")
|
||||||
}
|
}
|
||||||
|
@ -109,7 +110,7 @@ func fillSecondaryGlobalOpts(opts secondaryRepoOptions, gopts GlobalOptions, rep
|
||||||
return GlobalOptions{}, false, err
|
return GlobalOptions{}, false, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dstGopts.password, err = ReadPassword(dstGopts, "enter password for "+repoPrefix+" repository: ")
|
dstGopts.password, err = ReadPassword(ctx, dstGopts, "enter password for "+repoPrefix+" repository: ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return GlobalOptions{}, false, err
|
return GlobalOptions{}, false, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -170,7 +171,7 @@ func TestFillSecondaryGlobalOpts(t *testing.T) {
|
||||||
|
|
||||||
// Test all valid cases
|
// Test all valid cases
|
||||||
for _, testCase := range validSecondaryRepoTestCases {
|
for _, testCase := range validSecondaryRepoTestCases {
|
||||||
DstGOpts, isFromRepo, err := fillSecondaryGlobalOpts(testCase.Opts, gOpts, "destination")
|
DstGOpts, isFromRepo, err := fillSecondaryGlobalOpts(context.TODO(), testCase.Opts, gOpts, "destination")
|
||||||
rtest.OK(t, err)
|
rtest.OK(t, err)
|
||||||
rtest.Equals(t, DstGOpts, testCase.DstGOpts)
|
rtest.Equals(t, DstGOpts, testCase.DstGOpts)
|
||||||
rtest.Equals(t, isFromRepo, testCase.FromRepo)
|
rtest.Equals(t, isFromRepo, testCase.FromRepo)
|
||||||
|
@ -178,7 +179,7 @@ func TestFillSecondaryGlobalOpts(t *testing.T) {
|
||||||
|
|
||||||
// Test all invalid cases
|
// Test all invalid cases
|
||||||
for _, testCase := range invalidSecondaryRepoTestCases {
|
for _, testCase := range invalidSecondaryRepoTestCases {
|
||||||
_, _, err := fillSecondaryGlobalOpts(testCase.Opts, gOpts, "destination")
|
_, _, err := fillSecondaryGlobalOpts(context.TODO(), testCase.Opts, gOpts, "destination")
|
||||||
rtest.Assert(t, err != nil, "Expected error, but function did not return an error")
|
rtest.Assert(t, err != nil, "Expected error, but function did not return an error")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -303,7 +303,7 @@ func generateFiles() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var versionPattern = `var version = ".*"`
|
var versionPattern = `const version = ".*"`
|
||||||
|
|
||||||
const versionCodeFile = "cmd/restic/global.go"
|
const versionCodeFile = "cmd/restic/global.go"
|
||||||
|
|
||||||
|
@ -313,7 +313,7 @@ func updateVersion() {
|
||||||
die("unable to write version to file: %v", err)
|
die("unable to write version to file: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
newVersion := fmt.Sprintf("var version = %q", opts.Version)
|
newVersion := fmt.Sprintf("const version = %q", opts.Version)
|
||||||
replace(versionCodeFile, versionPattern, newVersion)
|
replace(versionCodeFile, versionPattern, newVersion)
|
||||||
|
|
||||||
if len(uncommittedChanges("VERSION")) > 0 || len(uncommittedChanges(versionCodeFile)) > 0 {
|
if len(uncommittedChanges("VERSION")) > 0 || len(uncommittedChanges(versionCodeFile)) > 0 {
|
||||||
|
@ -323,7 +323,7 @@ func updateVersion() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateVersionDev() {
|
func updateVersionDev() {
|
||||||
newVersion := fmt.Sprintf(`var version = "%s-dev (compiled manually)"`, opts.Version)
|
newVersion := fmt.Sprintf(`const version = "%s-dev (compiled manually)"`, opts.Version)
|
||||||
replace(versionCodeFile, versionPattern, newVersion)
|
replace(versionCodeFile, versionPattern, newVersion)
|
||||||
|
|
||||||
msg("committing cmd/restic/global.go with dev version")
|
msg("committing cmd/restic/global.go with dev version")
|
||||||
|
|
|
@ -380,6 +380,7 @@ func (fn *FutureNode) take(ctx context.Context) futureNodeResult {
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
return futureNodeResult{err: ctx.Err()}
|
||||||
}
|
}
|
||||||
return futureNodeResult{err: errors.Errorf("no result")}
|
return futureNodeResult{err: errors.Errorf("no result")}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1430,7 +1430,7 @@ func TestArchiverSnapshot(t *testing.T) {
|
||||||
}
|
}
|
||||||
TestEnsureSnapshot(t, repo, snapshotID, want)
|
TestEnsureSnapshot(t, repo, snapshotID, want)
|
||||||
|
|
||||||
checker.TestCheckRepo(t, repo)
|
checker.TestCheckRepo(t, repo, false)
|
||||||
|
|
||||||
// check that the snapshot contains the targets with absolute paths
|
// check that the snapshot contains the targets with absolute paths
|
||||||
for i, target := range sn.Paths {
|
for i, target := range sn.Paths {
|
||||||
|
@ -1590,7 +1590,7 @@ func TestArchiverSnapshotSelect(t *testing.T) {
|
||||||
}
|
}
|
||||||
TestEnsureSnapshot(t, repo, snapshotID, want)
|
TestEnsureSnapshot(t, repo, snapshotID, want)
|
||||||
|
|
||||||
checker.TestCheckRepo(t, repo)
|
checker.TestCheckRepo(t, repo, false)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1794,7 +1794,7 @@ func TestArchiverParent(t *testing.T) {
|
||||||
t.Logf("second backup saved as %v", secondSnapshotID.Str())
|
t.Logf("second backup saved as %v", secondSnapshotID.Str())
|
||||||
t.Logf("testfs: %v", testFS)
|
t.Logf("testfs: %v", testFS)
|
||||||
|
|
||||||
checker.TestCheckRepo(t, repo)
|
checker.TestCheckRepo(t, repo, false)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1927,7 +1927,7 @@ func TestArchiverErrorReporting(t *testing.T) {
|
||||||
}
|
}
|
||||||
TestEnsureSnapshot(t, repo, snapshotID, want)
|
TestEnsureSnapshot(t, repo, snapshotID, want)
|
||||||
|
|
||||||
checker.TestCheckRepo(t, repo)
|
checker.TestCheckRepo(t, repo, false)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2288,7 +2288,7 @@ func TestMetadataChanged(t *testing.T) {
|
||||||
// make sure the content matches
|
// make sure the content matches
|
||||||
TestEnsureFileContent(context.Background(), t, repo, "testfile", node3, files["testfile"].(TestFile))
|
TestEnsureFileContent(context.Background(), t, repo, "testfile", node3, files["testfile"].(TestFile))
|
||||||
|
|
||||||
checker.TestCheckRepo(t, repo)
|
checker.TestCheckRepo(t, repo, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRacyFileSwap(t *testing.T) {
|
func TestRacyFileSwap(t *testing.T) {
|
||||||
|
|
|
@ -90,6 +90,10 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I
|
||||||
// return the error if it wasn't ignored
|
// return the error if it wasn't ignored
|
||||||
if fnr.err != nil {
|
if fnr.err != nil {
|
||||||
debug.Log("err for %v: %v", fnr.snPath, fnr.err)
|
debug.Log("err for %v: %v", fnr.snPath, fnr.err)
|
||||||
|
if fnr.err == context.Canceled {
|
||||||
|
return nil, stats, fnr.err
|
||||||
|
}
|
||||||
|
|
||||||
fnr.err = s.errFn(fnr.target, fnr.err)
|
fnr.err = s.errFn(fnr.target, fnr.err)
|
||||||
if fnr.err == nil {
|
if fnr.err == nil {
|
||||||
// ignore error
|
// ignore error
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -41,7 +42,7 @@ func NewFactory() location.Factory {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
var errNotFound = errors.New("not found")
|
var errNotFound = fmt.Errorf("not found")
|
||||||
|
|
||||||
const connectionCount = 2
|
const connectionCount = 2
|
||||||
|
|
||||||
|
|
|
@ -17,13 +17,17 @@ type Config struct {
|
||||||
Command string `option:"command" help:"specify command to create sftp connection"`
|
Command string `option:"command" help:"specify command to create sftp connection"`
|
||||||
Args string `option:"args" help:"specify arguments for ssh"`
|
Args string `option:"args" help:"specify arguments for ssh"`
|
||||||
|
|
||||||
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
|
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
|
||||||
|
MaxConcurrentRequestsPerFile int `option:"max_concurrent_requests_per_file" help:"sets the maximum concurrent requests allowed for a single file (default: 64)"`
|
||||||
|
MaxPacket int `option:"max_packet" help:"sets the maximum size of the payload, measured in bytes (default: 32768)"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConfig returns a new config with default options applied.
|
// NewConfig returns a new config with default options applied.
|
||||||
func NewConfig() Config {
|
func NewConfig() Config {
|
||||||
return Config{
|
return Config{
|
||||||
Connections: 5,
|
Connections: 5,
|
||||||
|
MaxConcurrentRequestsPerFile: 64,
|
||||||
|
MaxPacket: 32768,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -102,7 +102,10 @@ func startClient(cfg Config) (*SFTP, error) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// open the SFTP session
|
// open the SFTP session
|
||||||
client, err := sftp.NewClientPipe(rd, wr)
|
client, err := sftp.NewClientPipe(rd, wr,
|
||||||
|
sftp.MaxConcurrentRequestsPerFile(cfg.MaxConcurrentRequestsPerFile),
|
||||||
|
sftp.MaxPacketUnchecked(cfg.MaxPacket),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Errorf("unable to start the sftp session, error: %v", err)
|
return nil, errors.Errorf("unable to start the sftp session, error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -165,7 +165,8 @@ func (c *Cache) Clear(t restic.FileType, valid restic.IDSet) error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = fs.Remove(c.filename(backend.Handle{Type: t, Name: id.String()})); err != nil {
|
// ignore ErrNotExist to gracefully handle multiple processes running Clear() concurrently
|
||||||
|
if err = fs.Remove(c.filename(backend.Handle{Type: t, Name: id.String()})); err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -106,9 +106,9 @@ func (c *Checker) LoadSnapshots(ctx context.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func computePackTypes(ctx context.Context, idx restic.MasterIndex) map[restic.ID]restic.BlobType {
|
func computePackTypes(ctx context.Context, idx restic.MasterIndex) (map[restic.ID]restic.BlobType, error) {
|
||||||
packs := make(map[restic.ID]restic.BlobType)
|
packs := make(map[restic.ID]restic.BlobType)
|
||||||
idx.Each(ctx, func(pb restic.PackedBlob) {
|
err := idx.Each(ctx, func(pb restic.PackedBlob) {
|
||||||
tpe, exists := packs[pb.PackID]
|
tpe, exists := packs[pb.PackID]
|
||||||
if exists {
|
if exists {
|
||||||
if pb.Type != tpe {
|
if pb.Type != tpe {
|
||||||
|
@ -119,7 +119,7 @@ func computePackTypes(ctx context.Context, idx restic.MasterIndex) map[restic.ID
|
||||||
}
|
}
|
||||||
packs[pb.PackID] = tpe
|
packs[pb.PackID] = tpe
|
||||||
})
|
})
|
||||||
return packs
|
return packs, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadIndex loads all index files.
|
// LoadIndex loads all index files.
|
||||||
|
@ -169,7 +169,7 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e
|
||||||
|
|
||||||
debug.Log("process blobs")
|
debug.Log("process blobs")
|
||||||
cnt := 0
|
cnt := 0
|
||||||
index.Each(ctx, func(blob restic.PackedBlob) {
|
err = index.Each(ctx, func(blob restic.PackedBlob) {
|
||||||
cnt++
|
cnt++
|
||||||
|
|
||||||
if _, ok := packToIndex[blob.PackID]; !ok {
|
if _, ok := packToIndex[blob.PackID]; !ok {
|
||||||
|
@ -179,7 +179,7 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e
|
||||||
})
|
})
|
||||||
|
|
||||||
debug.Log("%d blobs processed", cnt)
|
debug.Log("%d blobs processed", cnt)
|
||||||
return nil
|
return err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
|
@ -193,8 +193,14 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e
|
||||||
}
|
}
|
||||||
|
|
||||||
// compute pack size using index entries
|
// compute pack size using index entries
|
||||||
c.packs = pack.Size(ctx, c.masterIndex, false)
|
c.packs, err = pack.Size(ctx, c.masterIndex, false)
|
||||||
packTypes := computePackTypes(ctx, c.masterIndex)
|
if err != nil {
|
||||||
|
return hints, append(errs, err)
|
||||||
|
}
|
||||||
|
packTypes, err := computePackTypes(ctx, c.masterIndex)
|
||||||
|
if err != nil {
|
||||||
|
return hints, append(errs, err)
|
||||||
|
}
|
||||||
|
|
||||||
debug.Log("checking for duplicate packs")
|
debug.Log("checking for duplicate packs")
|
||||||
for packID := range c.packs {
|
for packID := range c.packs {
|
||||||
|
@ -484,7 +490,7 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnusedBlobs returns all blobs that have never been referenced.
|
// UnusedBlobs returns all blobs that have never been referenced.
|
||||||
func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles) {
|
func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles, err error) {
|
||||||
if !c.trackUnused {
|
if !c.trackUnused {
|
||||||
panic("only works when tracking blob references")
|
panic("only works when tracking blob references")
|
||||||
}
|
}
|
||||||
|
@ -495,7 +501,7 @@ func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles) {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
c.repo.Index().Each(ctx, func(blob restic.PackedBlob) {
|
err = c.repo.Index().Each(ctx, func(blob restic.PackedBlob) {
|
||||||
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
|
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
|
||||||
if !c.blobRefs.M.Has(h) {
|
if !c.blobRefs.M.Has(h) {
|
||||||
debug.Log("blob %v not referenced", h)
|
debug.Log("blob %v not referenced", h)
|
||||||
|
@ -503,7 +509,7 @@ func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
return blobs
|
return blobs, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// CountPacks returns the number of packs in the repository.
|
// CountPacks returns the number of packs in the repository.
|
||||||
|
|
|
@ -180,7 +180,8 @@ func TestUnreferencedBlobs(t *testing.T) {
|
||||||
test.OKs(t, checkPacks(chkr))
|
test.OKs(t, checkPacks(chkr))
|
||||||
test.OKs(t, checkStruct(chkr))
|
test.OKs(t, checkStruct(chkr))
|
||||||
|
|
||||||
blobs := chkr.UnusedBlobs(context.TODO())
|
blobs, err := chkr.UnusedBlobs(context.TODO())
|
||||||
|
test.OK(t, err)
|
||||||
sort.Sort(blobs)
|
sort.Sort(blobs)
|
||||||
|
|
||||||
test.Equals(t, unusedBlobsBySnapshot, blobs)
|
test.Equals(t, unusedBlobsBySnapshot, blobs)
|
||||||
|
|
|
@ -8,7 +8,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestCheckRepo runs the checker on repo.
|
// TestCheckRepo runs the checker on repo.
|
||||||
func TestCheckRepo(t testing.TB, repo restic.Repository) {
|
func TestCheckRepo(t testing.TB, repo restic.Repository, skipStructure bool) {
|
||||||
chkr := New(repo, true)
|
chkr := New(repo, true)
|
||||||
|
|
||||||
hints, errs := chkr.LoadIndex(context.TODO(), nil)
|
hints, errs := chkr.LoadIndex(context.TODO(), nil)
|
||||||
|
@ -33,18 +33,23 @@ func TestCheckRepo(t testing.TB, repo restic.Repository) {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// structure
|
if !skipStructure {
|
||||||
errChan = make(chan error)
|
// structure
|
||||||
go chkr.Structure(context.TODO(), nil, errChan)
|
errChan = make(chan error)
|
||||||
|
go chkr.Structure(context.TODO(), nil, errChan)
|
||||||
|
|
||||||
for err := range errChan {
|
for err := range errChan {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// unused blobs
|
// unused blobs
|
||||||
blobs := chkr.UnusedBlobs(context.TODO())
|
blobs, err := chkr.UnusedBlobs(context.TODO())
|
||||||
if len(blobs) > 0 {
|
if err != nil {
|
||||||
t.Errorf("unused blobs found: %v", blobs)
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if len(blobs) > 0 {
|
||||||
|
t.Errorf("unused blobs found: %v", blobs)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// read data
|
// read data
|
||||||
|
|
|
@ -190,7 +190,7 @@ func (e *vssError) Error() string {
|
||||||
return fmt.Sprintf("VSS error: %s: %s (%#x)", e.text, e.hresult.Str(), e.hresult)
|
return fmt.Sprintf("VSS error: %s: %s (%#x)", e.text, e.hresult.Str(), e.hresult)
|
||||||
}
|
}
|
||||||
|
|
||||||
// VssError encapsulates errors returned from calling VSS api.
|
// vssTextError encapsulates errors returned from calling VSS api.
|
||||||
type vssTextError struct {
|
type vssTextError struct {
|
||||||
text string
|
text string
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -69,11 +70,9 @@ func (idx *Index) addToPacks(id restic.ID) int {
|
||||||
return len(idx.packs) - 1
|
return len(idx.packs) - 1
|
||||||
}
|
}
|
||||||
|
|
||||||
const maxuint32 = 1<<32 - 1
|
|
||||||
|
|
||||||
func (idx *Index) store(packIndex int, blob restic.Blob) {
|
func (idx *Index) store(packIndex int, blob restic.Blob) {
|
||||||
// assert that offset and length fit into uint32!
|
// assert that offset and length fit into uint32!
|
||||||
if blob.Offset > maxuint32 || blob.Length > maxuint32 || blob.UncompressedLength > maxuint32 {
|
if blob.Offset > math.MaxUint32 || blob.Length > math.MaxUint32 || blob.UncompressedLength > math.MaxUint32 {
|
||||||
panic("offset or length does not fit in uint32. You have packs > 4GB!")
|
panic("offset or length does not fit in uint32. You have packs > 4GB!")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -219,7 +218,7 @@ func (idx *Index) AddToSupersedes(ids ...restic.ID) error {
|
||||||
|
|
||||||
// Each passes all blobs known to the index to the callback fn. This blocks any
|
// Each passes all blobs known to the index to the callback fn. This blocks any
|
||||||
// modification of the index.
|
// modification of the index.
|
||||||
func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) {
|
func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) error {
|
||||||
idx.m.Lock()
|
idx.m.Lock()
|
||||||
defer idx.m.Unlock()
|
defer idx.m.Unlock()
|
||||||
|
|
||||||
|
@ -233,6 +232,7 @@ func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) {
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
type EachByPackResult struct {
|
type EachByPackResult struct {
|
||||||
|
|
|
@ -339,7 +339,7 @@ func TestIndexUnserialize(t *testing.T) {
|
||||||
|
|
||||||
rtest.Equals(t, oldIdx, idx.Supersedes())
|
rtest.Equals(t, oldIdx, idx.Supersedes())
|
||||||
|
|
||||||
blobs := listPack(idx, exampleLookupTest.packID)
|
blobs := listPack(t, idx, exampleLookupTest.packID)
|
||||||
if len(blobs) != len(exampleLookupTest.blobs) {
|
if len(blobs) != len(exampleLookupTest.blobs) {
|
||||||
t.Fatalf("expected %d blobs in pack, got %d", len(exampleLookupTest.blobs), len(blobs))
|
t.Fatalf("expected %d blobs in pack, got %d", len(exampleLookupTest.blobs), len(blobs))
|
||||||
}
|
}
|
||||||
|
@ -356,12 +356,12 @@ func TestIndexUnserialize(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func listPack(idx *index.Index, id restic.ID) (pbs []restic.PackedBlob) {
|
func listPack(t testing.TB, idx *index.Index, id restic.ID) (pbs []restic.PackedBlob) {
|
||||||
idx.Each(context.TODO(), func(pb restic.PackedBlob) {
|
rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) {
|
||||||
if pb.PackID.Equal(id) {
|
if pb.PackID.Equal(id) {
|
||||||
pbs = append(pbs, pb)
|
pbs = append(pbs, pb)
|
||||||
}
|
}
|
||||||
})
|
}))
|
||||||
return pbs
|
return pbs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -223,13 +223,16 @@ func (mi *MasterIndex) finalizeFullIndexes() []*Index {
|
||||||
|
|
||||||
// Each runs fn on all blobs known to the index. When the context is cancelled,
|
// Each runs fn on all blobs known to the index. When the context is cancelled,
|
||||||
// the index iteration return immediately. This blocks any modification of the index.
|
// the index iteration return immediately. This blocks any modification of the index.
|
||||||
func (mi *MasterIndex) Each(ctx context.Context, fn func(restic.PackedBlob)) {
|
func (mi *MasterIndex) Each(ctx context.Context, fn func(restic.PackedBlob)) error {
|
||||||
mi.idxMutex.RLock()
|
mi.idxMutex.RLock()
|
||||||
defer mi.idxMutex.RUnlock()
|
defer mi.idxMutex.RUnlock()
|
||||||
|
|
||||||
for _, idx := range mi.idx {
|
for _, idx := range mi.idx {
|
||||||
idx.Each(ctx, fn)
|
if err := idx.Each(ctx, fn); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MergeFinalIndexes merges all final indexes together.
|
// MergeFinalIndexes merges all final indexes together.
|
||||||
|
@ -320,6 +323,9 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.Repository, exclude
|
||||||
newIndex = NewIndex()
|
newIndex = NewIndex()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if wgCtx.Err() != nil {
|
||||||
|
return wgCtx.Err()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err := newIndex.AddToSupersedes(extraObsolete...)
|
err := newIndex.AddToSupersedes(extraObsolete...)
|
||||||
|
@ -426,10 +432,6 @@ func (mi *MasterIndex) ListPacks(ctx context.Context, packs restic.IDSet) <-chan
|
||||||
defer close(out)
|
defer close(out)
|
||||||
// only resort a part of the index to keep the memory overhead bounded
|
// only resort a part of the index to keep the memory overhead bounded
|
||||||
for i := byte(0); i < 16; i++ {
|
for i := byte(0); i < 16; i++ {
|
||||||
if ctx.Err() != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
packBlob := make(map[restic.ID][]restic.Blob)
|
packBlob := make(map[restic.ID][]restic.Blob)
|
||||||
for pack := range packs {
|
for pack := range packs {
|
||||||
if pack[0]&0xf == i {
|
if pack[0]&0xf == i {
|
||||||
|
@ -439,11 +441,14 @@ func (mi *MasterIndex) ListPacks(ctx context.Context, packs restic.IDSet) <-chan
|
||||||
if len(packBlob) == 0 {
|
if len(packBlob) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
mi.Each(ctx, func(pb restic.PackedBlob) {
|
err := mi.Each(ctx, func(pb restic.PackedBlob) {
|
||||||
if packs.Has(pb.PackID) && pb.PackID[0]&0xf == i {
|
if packs.Has(pb.PackID) && pb.PackID[0]&0xf == i {
|
||||||
packBlob[pb.PackID] = append(packBlob[pb.PackID], pb.Blob)
|
packBlob[pb.PackID] = append(packBlob[pb.PackID], pb.Blob)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// pass on packs
|
// pass on packs
|
||||||
for packID, pbs := range packBlob {
|
for packID, pbs := range packBlob {
|
||||||
|
|
|
@ -166,9 +166,9 @@ func TestMasterMergeFinalIndexes(t *testing.T) {
|
||||||
rtest.Equals(t, 1, idxCount)
|
rtest.Equals(t, 1, idxCount)
|
||||||
|
|
||||||
blobCount := 0
|
blobCount := 0
|
||||||
mIdx.Each(context.TODO(), func(pb restic.PackedBlob) {
|
rtest.OK(t, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) {
|
||||||
blobCount++
|
blobCount++
|
||||||
})
|
}))
|
||||||
rtest.Equals(t, 2, blobCount)
|
rtest.Equals(t, 2, blobCount)
|
||||||
|
|
||||||
blobs := mIdx.Lookup(bhInIdx1)
|
blobs := mIdx.Lookup(bhInIdx1)
|
||||||
|
@ -198,9 +198,9 @@ func TestMasterMergeFinalIndexes(t *testing.T) {
|
||||||
rtest.Equals(t, []restic.PackedBlob{blob2}, blobs)
|
rtest.Equals(t, []restic.PackedBlob{blob2}, blobs)
|
||||||
|
|
||||||
blobCount = 0
|
blobCount = 0
|
||||||
mIdx.Each(context.TODO(), func(pb restic.PackedBlob) {
|
rtest.OK(t, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) {
|
||||||
blobCount++
|
blobCount++
|
||||||
})
|
}))
|
||||||
rtest.Equals(t, 2, blobCount)
|
rtest.Equals(t, 2, blobCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -319,9 +319,9 @@ func BenchmarkMasterIndexEach(b *testing.B) {
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
entries := 0
|
entries := 0
|
||||||
mIdx.Each(context.TODO(), func(pb restic.PackedBlob) {
|
rtest.OK(b, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) {
|
||||||
entries++
|
entries++
|
||||||
})
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -389,10 +389,10 @@ func CalculateHeaderSize(blobs []restic.Blob) int {
|
||||||
// If onlyHdr is set to true, only the size of the header is returned
|
// If onlyHdr is set to true, only the size of the header is returned
|
||||||
// Note that this function only gives correct sizes, if there are no
|
// Note that this function only gives correct sizes, if there are no
|
||||||
// duplicates in the index.
|
// duplicates in the index.
|
||||||
func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) map[restic.ID]int64 {
|
func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) (map[restic.ID]int64, error) {
|
||||||
packSize := make(map[restic.ID]int64)
|
packSize := make(map[restic.ID]int64)
|
||||||
|
|
||||||
mi.Each(ctx, func(blob restic.PackedBlob) {
|
err := mi.Each(ctx, func(blob restic.PackedBlob) {
|
||||||
size, ok := packSize[blob.PackID]
|
size, ok := packSize[blob.PackID]
|
||||||
if !ok {
|
if !ok {
|
||||||
size = headerSize
|
size = headerSize
|
||||||
|
@ -403,5 +403,5 @@ func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) map[restic.I
|
||||||
packSize[blob.PackID] = size + int64(CalculateEntrySize(blob.Blob))
|
packSize[blob.PackID] = size + int64(CalculateEntrySize(blob.Blob))
|
||||||
})
|
})
|
||||||
|
|
||||||
return packSize
|
return packSize, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,638 @@
|
||||||
|
package repository
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/errors"
|
||||||
|
"github.com/restic/restic/internal/index"
|
||||||
|
"github.com/restic/restic/internal/pack"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
"github.com/restic/restic/internal/ui/progress"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrIndexIncomplete = errors.Fatal("index is not complete")
|
||||||
|
var ErrPacksMissing = errors.Fatal("packs from index missing in repo")
|
||||||
|
var ErrSizeNotMatching = errors.Fatal("pack size does not match calculated size from index")
|
||||||
|
|
||||||
|
// PruneOptions collects all options for the cleanup command.
|
||||||
|
type PruneOptions struct {
|
||||||
|
DryRun bool
|
||||||
|
UnsafeRecovery bool
|
||||||
|
|
||||||
|
MaxUnusedBytes func(used uint64) (unused uint64) // calculates the number of unused bytes after repacking, according to MaxUnused
|
||||||
|
MaxRepackBytes uint64
|
||||||
|
|
||||||
|
RepackCachableOnly bool
|
||||||
|
RepackSmall bool
|
||||||
|
RepackUncompressed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type PruneStats struct {
|
||||||
|
Blobs struct {
|
||||||
|
Used uint
|
||||||
|
Duplicate uint
|
||||||
|
Unused uint
|
||||||
|
Remove uint
|
||||||
|
Repack uint
|
||||||
|
Repackrm uint
|
||||||
|
}
|
||||||
|
Size struct {
|
||||||
|
Used uint64
|
||||||
|
Duplicate uint64
|
||||||
|
Unused uint64
|
||||||
|
Remove uint64
|
||||||
|
Repack uint64
|
||||||
|
Repackrm uint64
|
||||||
|
Unref uint64
|
||||||
|
Uncompressed uint64
|
||||||
|
}
|
||||||
|
Packs struct {
|
||||||
|
Used uint
|
||||||
|
Unused uint
|
||||||
|
PartlyUsed uint
|
||||||
|
Unref uint
|
||||||
|
Keep uint
|
||||||
|
Repack uint
|
||||||
|
Remove uint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type PrunePlan struct {
|
||||||
|
removePacksFirst restic.IDSet // packs to remove first (unreferenced packs)
|
||||||
|
repackPacks restic.IDSet // packs to repack
|
||||||
|
keepBlobs restic.CountedBlobSet // blobs to keep during repacking
|
||||||
|
removePacks restic.IDSet // packs to remove
|
||||||
|
ignorePacks restic.IDSet // packs to ignore when rebuilding the index
|
||||||
|
|
||||||
|
repo restic.Repository
|
||||||
|
stats PruneStats
|
||||||
|
opts PruneOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
type packInfo struct {
|
||||||
|
usedBlobs uint
|
||||||
|
unusedBlobs uint
|
||||||
|
usedSize uint64
|
||||||
|
unusedSize uint64
|
||||||
|
tpe restic.BlobType
|
||||||
|
uncompressed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type packInfoWithID struct {
|
||||||
|
ID restic.ID
|
||||||
|
packInfo
|
||||||
|
mustCompress bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// PlanPrune selects which files to rewrite and which to delete and which blobs to keep.
|
||||||
|
// Also some summary statistics are returned.
|
||||||
|
func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error), printer progress.Printer) (*PrunePlan, error) {
|
||||||
|
var stats PruneStats
|
||||||
|
|
||||||
|
if opts.UnsafeRecovery {
|
||||||
|
// prevent repacking data to make sure users cannot get stuck.
|
||||||
|
opts.MaxRepackBytes = 0
|
||||||
|
}
|
||||||
|
if repo.Connections() < 2 {
|
||||||
|
return nil, fmt.Errorf("prune requires a backend connection limit of at least two")
|
||||||
|
}
|
||||||
|
if repo.Config().Version < 2 && opts.RepackUncompressed {
|
||||||
|
return nil, fmt.Errorf("compression requires at least repository format version 2")
|
||||||
|
}
|
||||||
|
|
||||||
|
usedBlobs, err := getUsedBlobs(ctx, repo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
printer.P("searching used packs...\n")
|
||||||
|
keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats, printer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
printer.P("collecting packs for deletion and repacking\n")
|
||||||
|
plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, printer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(plan.repackPacks) != 0 {
|
||||||
|
blobCount := keepBlobs.Len()
|
||||||
|
// when repacking, we do not want to keep blobs which are
|
||||||
|
// already contained in kept packs, so delete them from keepBlobs
|
||||||
|
err := repo.Index().Each(ctx, func(blob restic.PackedBlob) {
|
||||||
|
if plan.removePacks.Has(blob.PackID) || plan.repackPacks.Has(blob.PackID) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
keepBlobs.Delete(blob.BlobHandle)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if keepBlobs.Len() < blobCount/2 {
|
||||||
|
// replace with copy to shrink map to necessary size if there's a chance to benefit
|
||||||
|
keepBlobs = keepBlobs.Copy()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// keepBlobs is only needed if packs are repacked
|
||||||
|
keepBlobs = nil
|
||||||
|
}
|
||||||
|
plan.keepBlobs = keepBlobs
|
||||||
|
|
||||||
|
plan.repo = repo
|
||||||
|
plan.stats = stats
|
||||||
|
plan.opts = opts
|
||||||
|
|
||||||
|
return &plan, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *PruneStats, printer progress.Printer) (restic.CountedBlobSet, map[restic.ID]packInfo, error) {
|
||||||
|
// iterate over all blobs in index to find out which blobs are duplicates
|
||||||
|
// The counter in usedBlobs describes how many instances of the blob exist in the repository index
|
||||||
|
// Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist
|
||||||
|
err := idx.Each(ctx, func(blob restic.PackedBlob) {
|
||||||
|
bh := blob.BlobHandle
|
||||||
|
count, ok := usedBlobs[bh]
|
||||||
|
if ok {
|
||||||
|
if count < math.MaxUint8 {
|
||||||
|
// don't overflow, but saturate count at 255
|
||||||
|
// this can lead to a non-optimal pack selection, but won't cause
|
||||||
|
// problems otherwise
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
usedBlobs[bh] = count
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if all used blobs have been found in index
|
||||||
|
missingBlobs := restic.NewBlobSet()
|
||||||
|
for bh, count := range usedBlobs {
|
||||||
|
if count == 0 {
|
||||||
|
// blob does not exist in any pack files
|
||||||
|
missingBlobs.Insert(bh)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(missingBlobs) != 0 {
|
||||||
|
printer.E("%v not found in the index\n\n"+
|
||||||
|
"Integrity check failed: Data seems to be missing.\n"+
|
||||||
|
"Will not start prune to prevent (additional) data loss!\n"+
|
||||||
|
"Please report this error (along with the output of the 'prune' run) at\n"+
|
||||||
|
"https://github.com/restic/restic/issues/new/choose\n", missingBlobs)
|
||||||
|
return nil, nil, ErrIndexIncomplete
|
||||||
|
}
|
||||||
|
|
||||||
|
indexPack := make(map[restic.ID]packInfo)
|
||||||
|
|
||||||
|
// save computed pack header size
|
||||||
|
sz, err := pack.Size(ctx, idx, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
for pid, hdrSize := range sz {
|
||||||
|
// initialize tpe with NumBlobTypes to indicate it's not set
|
||||||
|
indexPack[pid] = packInfo{tpe: restic.NumBlobTypes, usedSize: uint64(hdrSize)}
|
||||||
|
}
|
||||||
|
|
||||||
|
hasDuplicates := false
|
||||||
|
// iterate over all blobs in index to generate packInfo
|
||||||
|
err = idx.Each(ctx, func(blob restic.PackedBlob) {
|
||||||
|
ip := indexPack[blob.PackID]
|
||||||
|
|
||||||
|
// Set blob type if not yet set
|
||||||
|
if ip.tpe == restic.NumBlobTypes {
|
||||||
|
ip.tpe = blob.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
// mark mixed packs with "Invalid blob type"
|
||||||
|
if ip.tpe != blob.Type {
|
||||||
|
ip.tpe = restic.InvalidBlob
|
||||||
|
}
|
||||||
|
|
||||||
|
bh := blob.BlobHandle
|
||||||
|
size := uint64(blob.Length)
|
||||||
|
dupCount := usedBlobs[bh]
|
||||||
|
switch {
|
||||||
|
case dupCount >= 2:
|
||||||
|
hasDuplicates = true
|
||||||
|
// mark as unused for now, we will later on select one copy
|
||||||
|
ip.unusedSize += size
|
||||||
|
ip.unusedBlobs++
|
||||||
|
|
||||||
|
// count as duplicate, will later on change one copy to be counted as used
|
||||||
|
stats.Size.Duplicate += size
|
||||||
|
stats.Blobs.Duplicate++
|
||||||
|
case dupCount == 1: // used blob, not duplicate
|
||||||
|
ip.usedSize += size
|
||||||
|
ip.usedBlobs++
|
||||||
|
|
||||||
|
stats.Size.Used += size
|
||||||
|
stats.Blobs.Used++
|
||||||
|
default: // unused blob
|
||||||
|
ip.unusedSize += size
|
||||||
|
ip.unusedBlobs++
|
||||||
|
|
||||||
|
stats.Size.Unused += size
|
||||||
|
stats.Blobs.Unused++
|
||||||
|
}
|
||||||
|
if !blob.IsCompressed() {
|
||||||
|
ip.uncompressed = true
|
||||||
|
}
|
||||||
|
// update indexPack
|
||||||
|
indexPack[blob.PackID] = ip
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// if duplicate blobs exist, those will be set to either "used" or "unused":
|
||||||
|
// - mark only one occurrence of duplicate blobs as used
|
||||||
|
// - if there are already some used blobs in a pack, possibly mark duplicates in this pack as "used"
|
||||||
|
// - if there are no used blobs in a pack, possibly mark duplicates as "unused"
|
||||||
|
if hasDuplicates {
|
||||||
|
// iterate again over all blobs in index (this is pretty cheap, all in-mem)
|
||||||
|
err = idx.Each(ctx, func(blob restic.PackedBlob) {
|
||||||
|
bh := blob.BlobHandle
|
||||||
|
count, ok := usedBlobs[bh]
|
||||||
|
// skip non-duplicate, aka. normal blobs
|
||||||
|
// count == 0 is used to mark that this was a duplicate blob with only a single occurrence remaining
|
||||||
|
if !ok || count == 1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ip := indexPack[blob.PackID]
|
||||||
|
size := uint64(blob.Length)
|
||||||
|
switch {
|
||||||
|
case ip.usedBlobs > 0, count == 0:
|
||||||
|
// other used blobs in pack or "last" occurrence -> transition to used
|
||||||
|
ip.usedSize += size
|
||||||
|
ip.usedBlobs++
|
||||||
|
ip.unusedSize -= size
|
||||||
|
ip.unusedBlobs--
|
||||||
|
// same for the global statistics
|
||||||
|
stats.Size.Used += size
|
||||||
|
stats.Blobs.Used++
|
||||||
|
stats.Size.Duplicate -= size
|
||||||
|
stats.Blobs.Duplicate--
|
||||||
|
// let other occurrences remain marked as unused
|
||||||
|
usedBlobs[bh] = 1
|
||||||
|
default:
|
||||||
|
// remain unused and decrease counter
|
||||||
|
count--
|
||||||
|
if count == 1 {
|
||||||
|
// setting count to 1 would lead to forgetting that this blob had duplicates
|
||||||
|
// thus use the special value zero. This will select the last instance of the blob for keeping.
|
||||||
|
count = 0
|
||||||
|
}
|
||||||
|
usedBlobs[bh] = count
|
||||||
|
}
|
||||||
|
// update indexPack
|
||||||
|
indexPack[blob.PackID] = ip
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanity check. If no duplicates exist, all blobs have value 1. After handling
|
||||||
|
// duplicates, this also applies to duplicates.
|
||||||
|
for _, count := range usedBlobs {
|
||||||
|
if count != 1 {
|
||||||
|
panic("internal error during blob selection")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return usedBlobs, indexPack, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *PruneStats, printer progress.Printer) (PrunePlan, error) {
|
||||||
|
removePacksFirst := restic.NewIDSet()
|
||||||
|
removePacks := restic.NewIDSet()
|
||||||
|
repackPacks := restic.NewIDSet()
|
||||||
|
|
||||||
|
var repackCandidates []packInfoWithID
|
||||||
|
var repackSmallCandidates []packInfoWithID
|
||||||
|
repoVersion := repo.Config().Version
|
||||||
|
// only repack very small files by default
|
||||||
|
targetPackSize := repo.PackSize() / 25
|
||||||
|
if opts.RepackSmall {
|
||||||
|
// consider files with at least 80% of the target size as large enough
|
||||||
|
targetPackSize = repo.PackSize() / 5 * 4
|
||||||
|
}
|
||||||
|
|
||||||
|
// loop over all packs and decide what to do
|
||||||
|
bar := printer.NewCounter("packs processed")
|
||||||
|
bar.SetMax(uint64(len(indexPack)))
|
||||||
|
err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error {
|
||||||
|
p, ok := indexPack[id]
|
||||||
|
if !ok {
|
||||||
|
// Pack was not referenced in index and is not used => immediately remove!
|
||||||
|
printer.V("will remove pack %v as it is unused and not indexed\n", id.Str())
|
||||||
|
removePacksFirst.Insert(id)
|
||||||
|
stats.Size.Unref += uint64(packSize)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.unusedSize+p.usedSize != uint64(packSize) && p.usedBlobs != 0 {
|
||||||
|
// Pack size does not fit and pack is needed => error
|
||||||
|
// If the pack is not needed, this is no error, the pack can
|
||||||
|
// and will be simply removed, see below.
|
||||||
|
printer.E("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n",
|
||||||
|
id.Str(), p.unusedSize+p.usedSize, packSize)
|
||||||
|
return ErrSizeNotMatching
|
||||||
|
}
|
||||||
|
|
||||||
|
// statistics
|
||||||
|
switch {
|
||||||
|
case p.usedBlobs == 0:
|
||||||
|
stats.Packs.Unused++
|
||||||
|
case p.unusedBlobs == 0:
|
||||||
|
stats.Packs.Used++
|
||||||
|
default:
|
||||||
|
stats.Packs.PartlyUsed++
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.uncompressed {
|
||||||
|
stats.Size.Uncompressed += p.unusedSize + p.usedSize
|
||||||
|
}
|
||||||
|
mustCompress := false
|
||||||
|
if repoVersion >= 2 {
|
||||||
|
// repo v2: always repack tree blobs if uncompressed
|
||||||
|
// compress data blobs if requested
|
||||||
|
mustCompress = (p.tpe == restic.TreeBlob || opts.RepackUncompressed) && p.uncompressed
|
||||||
|
}
|
||||||
|
|
||||||
|
// decide what to do
|
||||||
|
switch {
|
||||||
|
case p.usedBlobs == 0:
|
||||||
|
// All blobs in pack are no longer used => remove pack!
|
||||||
|
removePacks.Insert(id)
|
||||||
|
stats.Blobs.Remove += p.unusedBlobs
|
||||||
|
stats.Size.Remove += p.unusedSize
|
||||||
|
|
||||||
|
case opts.RepackCachableOnly && p.tpe == restic.DataBlob:
|
||||||
|
// if this is a data pack and --repack-cacheable-only is set => keep pack!
|
||||||
|
stats.Packs.Keep++
|
||||||
|
|
||||||
|
case p.unusedBlobs == 0 && p.tpe != restic.InvalidBlob && !mustCompress:
|
||||||
|
if packSize >= int64(targetPackSize) {
|
||||||
|
// All blobs in pack are used and not mixed => keep pack!
|
||||||
|
stats.Packs.Keep++
|
||||||
|
} else {
|
||||||
|
repackSmallCandidates = append(repackSmallCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress})
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
// all other packs are candidates for repacking
|
||||||
|
repackCandidates = append(repackCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress})
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(indexPack, id)
|
||||||
|
bar.Add(1)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
bar.Done()
|
||||||
|
if err != nil {
|
||||||
|
return PrunePlan{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point indexPacks contains only missing packs!
|
||||||
|
|
||||||
|
// missing packs that are not needed can be ignored
|
||||||
|
ignorePacks := restic.NewIDSet()
|
||||||
|
for id, p := range indexPack {
|
||||||
|
if p.usedBlobs == 0 {
|
||||||
|
ignorePacks.Insert(id)
|
||||||
|
stats.Blobs.Remove += p.unusedBlobs
|
||||||
|
stats.Size.Remove += p.unusedSize
|
||||||
|
delete(indexPack, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(indexPack) != 0 {
|
||||||
|
printer.E("The index references %d needed pack files which are missing from the repository:\n", len(indexPack))
|
||||||
|
for id := range indexPack {
|
||||||
|
printer.E(" %v\n", id)
|
||||||
|
}
|
||||||
|
return PrunePlan{}, ErrPacksMissing
|
||||||
|
}
|
||||||
|
if len(ignorePacks) != 0 {
|
||||||
|
printer.E("Missing but unneeded pack files are referenced in the index, will be repaired\n")
|
||||||
|
for id := range ignorePacks {
|
||||||
|
printer.E("will forget missing pack file %v\n", id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(repackSmallCandidates) < 10 {
|
||||||
|
// too few small files to be worth the trouble, this also prevents endlessly repacking
|
||||||
|
// if there is just a single pack file below the target size
|
||||||
|
stats.Packs.Keep += uint(len(repackSmallCandidates))
|
||||||
|
} else {
|
||||||
|
repackCandidates = append(repackCandidates, repackSmallCandidates...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort repackCandidates such that packs with highest ratio unused/used space are picked first.
|
||||||
|
// This is equivalent to sorting by unused / total space.
|
||||||
|
// Instead of unused[i] / used[i] > unused[j] / used[j] we use
|
||||||
|
// unused[i] * used[j] > unused[j] * used[i] as uint32*uint32 < uint64
|
||||||
|
// Moreover packs containing trees and too small packs are sorted to the beginning
|
||||||
|
sort.Slice(repackCandidates, func(i, j int) bool {
|
||||||
|
pi := repackCandidates[i].packInfo
|
||||||
|
pj := repackCandidates[j].packInfo
|
||||||
|
switch {
|
||||||
|
case pi.tpe != restic.DataBlob && pj.tpe == restic.DataBlob:
|
||||||
|
return true
|
||||||
|
case pj.tpe != restic.DataBlob && pi.tpe == restic.DataBlob:
|
||||||
|
return false
|
||||||
|
case pi.unusedSize+pi.usedSize < uint64(targetPackSize) && pj.unusedSize+pj.usedSize >= uint64(targetPackSize):
|
||||||
|
return true
|
||||||
|
case pj.unusedSize+pj.usedSize < uint64(targetPackSize) && pi.unusedSize+pi.usedSize >= uint64(targetPackSize):
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return pi.unusedSize*pj.usedSize > pj.unusedSize*pi.usedSize
|
||||||
|
})
|
||||||
|
|
||||||
|
repack := func(id restic.ID, p packInfo) {
|
||||||
|
repackPacks.Insert(id)
|
||||||
|
stats.Blobs.Repack += p.unusedBlobs + p.usedBlobs
|
||||||
|
stats.Size.Repack += p.unusedSize + p.usedSize
|
||||||
|
stats.Blobs.Repackrm += p.unusedBlobs
|
||||||
|
stats.Size.Repackrm += p.unusedSize
|
||||||
|
if p.uncompressed {
|
||||||
|
stats.Size.Uncompressed -= p.unusedSize + p.usedSize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculate limit for number of unused bytes in the repo after repacking
|
||||||
|
maxUnusedSizeAfter := opts.MaxUnusedBytes(stats.Size.Used)
|
||||||
|
|
||||||
|
for _, p := range repackCandidates {
|
||||||
|
reachedUnusedSizeAfter := (stats.Size.Unused-stats.Size.Remove-stats.Size.Repackrm < maxUnusedSizeAfter)
|
||||||
|
reachedRepackSize := stats.Size.Repack+p.unusedSize+p.usedSize >= opts.MaxRepackBytes
|
||||||
|
packIsLargeEnough := p.unusedSize+p.usedSize >= uint64(targetPackSize)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case reachedRepackSize:
|
||||||
|
stats.Packs.Keep++
|
||||||
|
|
||||||
|
case p.tpe != restic.DataBlob, p.mustCompress:
|
||||||
|
// repacking non-data packs / uncompressed-trees is only limited by repackSize
|
||||||
|
repack(p.ID, p.packInfo)
|
||||||
|
|
||||||
|
case reachedUnusedSizeAfter && packIsLargeEnough:
|
||||||
|
// for all other packs stop repacking if tolerated unused size is reached.
|
||||||
|
stats.Packs.Keep++
|
||||||
|
|
||||||
|
default:
|
||||||
|
repack(p.ID, p.packInfo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.Packs.Unref = uint(len(removePacksFirst))
|
||||||
|
stats.Packs.Repack = uint(len(repackPacks))
|
||||||
|
stats.Packs.Remove = uint(len(removePacks))
|
||||||
|
|
||||||
|
if repo.Config().Version < 2 {
|
||||||
|
// compression not supported for repository format version 1
|
||||||
|
stats.Size.Uncompressed = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return PrunePlan{removePacksFirst: removePacksFirst,
|
||||||
|
removePacks: removePacks,
|
||||||
|
repackPacks: repackPacks,
|
||||||
|
ignorePacks: ignorePacks,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (plan *PrunePlan) Stats() PruneStats {
|
||||||
|
return plan.stats
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute does the actual pruning:
|
||||||
|
// - remove unreferenced packs first
|
||||||
|
// - repack given pack files while keeping the given blobs
|
||||||
|
// - rebuild the index while ignoring all files that will be deleted
|
||||||
|
// - delete the files
|
||||||
|
// plan.removePacks and plan.ignorePacks are modified in this function.
|
||||||
|
func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (err error) {
|
||||||
|
if plan.opts.DryRun {
|
||||||
|
printer.V("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n")
|
||||||
|
if len(plan.removePacksFirst) > 0 {
|
||||||
|
printer.V("Would have removed the following unreferenced packs:\n%v\n\n", plan.removePacksFirst)
|
||||||
|
}
|
||||||
|
printer.V("Would have repacked and removed the following packs:\n%v\n\n", plan.repackPacks)
|
||||||
|
printer.V("Would have removed the following no longer used packs:\n%v\n\n", plan.removePacks)
|
||||||
|
// Always quit here if DryRun was set!
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
repo := plan.repo
|
||||||
|
// make sure the plan can only be used once
|
||||||
|
plan.repo = nil
|
||||||
|
|
||||||
|
// unreferenced packs can be safely deleted first
|
||||||
|
if len(plan.removePacksFirst) != 0 {
|
||||||
|
printer.P("deleting unreferenced packs\n")
|
||||||
|
_ = deleteFiles(ctx, true, repo, plan.removePacksFirst, restic.PackFile, printer)
|
||||||
|
}
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(plan.repackPacks) != 0 {
|
||||||
|
printer.P("repacking packs\n")
|
||||||
|
bar := printer.NewCounter("packs repacked")
|
||||||
|
bar.SetMax(uint64(len(plan.repackPacks)))
|
||||||
|
_, err := Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar)
|
||||||
|
bar.Done()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Fatal(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also remove repacked packs
|
||||||
|
plan.removePacks.Merge(plan.repackPacks)
|
||||||
|
|
||||||
|
if len(plan.keepBlobs) != 0 {
|
||||||
|
printer.E("%v was not repacked\n\n"+
|
||||||
|
"Integrity check failed.\n"+
|
||||||
|
"Please report this error (along with the output of the 'prune' run) at\n"+
|
||||||
|
"https://github.com/restic/restic/issues/new/choose\n", plan.keepBlobs)
|
||||||
|
return errors.Fatal("internal error: blobs were not repacked")
|
||||||
|
}
|
||||||
|
|
||||||
|
// allow GC of the blob set
|
||||||
|
plan.keepBlobs = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(plan.ignorePacks) == 0 {
|
||||||
|
plan.ignorePacks = plan.removePacks
|
||||||
|
} else {
|
||||||
|
plan.ignorePacks.Merge(plan.removePacks)
|
||||||
|
}
|
||||||
|
|
||||||
|
if plan.opts.UnsafeRecovery {
|
||||||
|
printer.P("deleting index files\n")
|
||||||
|
indexFiles := repo.Index().(*index.MasterIndex).IDs()
|
||||||
|
err = deleteFiles(ctx, false, repo, indexFiles, restic.IndexFile, printer)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Fatalf("%s", err)
|
||||||
|
}
|
||||||
|
} else if len(plan.ignorePacks) != 0 {
|
||||||
|
err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, false, printer)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Fatalf("%s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(plan.removePacks) != 0 {
|
||||||
|
printer.P("removing %d old packs\n", len(plan.removePacks))
|
||||||
|
_ = deleteFiles(ctx, true, repo, plan.removePacks, restic.PackFile, printer)
|
||||||
|
}
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
if plan.opts.UnsafeRecovery {
|
||||||
|
err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, true, printer)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Fatalf("%s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// drop outdated in-memory index
|
||||||
|
repo.ClearIndex()
|
||||||
|
|
||||||
|
printer.P("done\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteFiles deletes the given fileList of fileType in parallel
|
||||||
|
// if ignoreError=true, it will print a warning if there was an error, else it will abort.
|
||||||
|
func deleteFiles(ctx context.Context, ignoreError bool, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) error {
|
||||||
|
bar := printer.NewCounter("files deleted")
|
||||||
|
defer bar.Done()
|
||||||
|
|
||||||
|
return restic.ParallelRemove(ctx, repo, fileList, fileType, func(id restic.ID, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
printer.E("unable to remove %v/%v from the repository\n", fileType, id)
|
||||||
|
if !ignoreError {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printer.VV("removed %v/%v\n", fileType, id)
|
||||||
|
return nil
|
||||||
|
}, bar)
|
||||||
|
}
|
|
@ -0,0 +1,105 @@
|
||||||
|
package repository_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/checker"
|
||||||
|
"github.com/restic/restic/internal/repository"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
"github.com/restic/restic/internal/ui/progress"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) {
|
||||||
|
repo := repository.TestRepository(t).(*repository.Repository)
|
||||||
|
createRandomBlobs(t, repo, 4, 0.5, true)
|
||||||
|
createRandomBlobs(t, repo, 5, 0.5, true)
|
||||||
|
keep, _ := selectBlobs(t, repo, 0.5)
|
||||||
|
|
||||||
|
var wg errgroup.Group
|
||||||
|
repo.StartPackUploader(context.TODO(), &wg)
|
||||||
|
// duplicate a few blobs to exercise those code paths
|
||||||
|
for blob := range keep {
|
||||||
|
buf, err := repo.LoadBlob(context.TODO(), blob.Type, blob.ID, nil)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
_, _, _, err = repo.SaveBlob(context.TODO(), blob.Type, buf, blob.ID, true)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
}
|
||||||
|
rtest.OK(t, repo.Flush(context.TODO()))
|
||||||
|
|
||||||
|
plan, err := repository.PlanPrune(context.TODO(), opts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) {
|
||||||
|
return restic.NewCountedBlobSet(keep.List()...), nil
|
||||||
|
}, &progress.NoopPrinter{})
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
rtest.OK(t, plan.Execute(context.TODO(), &progress.NoopPrinter{}))
|
||||||
|
|
||||||
|
repo = repository.TestOpenBackend(t, repo.Backend()).(*repository.Repository)
|
||||||
|
checker.TestCheckRepo(t, repo, true)
|
||||||
|
|
||||||
|
if errOnUnused {
|
||||||
|
existing := listBlobs(repo)
|
||||||
|
rtest.Assert(t, existing.Equals(keep), "unexpected blobs, wanted %v got %v", keep, existing)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrune(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
name string
|
||||||
|
opts repository.PruneOptions
|
||||||
|
errOnUnused bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "0",
|
||||||
|
opts: repository.PruneOptions{
|
||||||
|
MaxRepackBytes: math.MaxUint64,
|
||||||
|
MaxUnusedBytes: func(used uint64) (unused uint64) { return 0 },
|
||||||
|
},
|
||||||
|
errOnUnused: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "50",
|
||||||
|
opts: repository.PruneOptions{
|
||||||
|
MaxRepackBytes: math.MaxUint64,
|
||||||
|
MaxUnusedBytes: func(used uint64) (unused uint64) { return used / 2 },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "unlimited",
|
||||||
|
opts: repository.PruneOptions{
|
||||||
|
MaxRepackBytes: math.MaxUint64,
|
||||||
|
MaxUnusedBytes: func(used uint64) (unused uint64) { return math.MaxUint64 },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cachableonly",
|
||||||
|
opts: repository.PruneOptions{
|
||||||
|
MaxRepackBytes: math.MaxUint64,
|
||||||
|
MaxUnusedBytes: func(used uint64) (unused uint64) { return used / 20 },
|
||||||
|
RepackCachableOnly: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "small",
|
||||||
|
opts: repository.PruneOptions{
|
||||||
|
MaxRepackBytes: math.MaxUint64,
|
||||||
|
MaxUnusedBytes: func(used uint64) (unused uint64) { return math.MaxUint64 },
|
||||||
|
RepackSmall: true,
|
||||||
|
},
|
||||||
|
errOnUnused: true,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
testPrune(t, test.opts, test.errOnUnused)
|
||||||
|
})
|
||||||
|
t.Run(test.name+"-recovery", func(t *testing.T) {
|
||||||
|
opts := test.opts
|
||||||
|
opts.UnsafeRecovery = true
|
||||||
|
// unsafeNoSpaceRecovery does not repack partially used pack files
|
||||||
|
testPrune(t, opts, false)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -72,7 +72,7 @@ func repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito
|
||||||
return wgCtx.Err()
|
return wgCtx.Err()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return wgCtx.Err()
|
||||||
})
|
})
|
||||||
|
|
||||||
worker := func() error {
|
worker := func() error {
|
||||||
|
|
|
@ -18,7 +18,7 @@ func randomSize(min, max int) int {
|
||||||
return rand.Intn(max-min) + min
|
return rand.Intn(max-min) + min
|
||||||
}
|
}
|
||||||
|
|
||||||
func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData float32) {
|
func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData float32, smallBlobs bool) {
|
||||||
var wg errgroup.Group
|
var wg errgroup.Group
|
||||||
repo.StartPackUploader(context.TODO(), &wg)
|
repo.StartPackUploader(context.TODO(), &wg)
|
||||||
|
|
||||||
|
@ -30,7 +30,11 @@ func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData fl
|
||||||
|
|
||||||
if rand.Float32() < pData {
|
if rand.Float32() < pData {
|
||||||
tpe = restic.DataBlob
|
tpe = restic.DataBlob
|
||||||
length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data
|
if smallBlobs {
|
||||||
|
length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB of data
|
||||||
|
} else {
|
||||||
|
length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
tpe = restic.TreeBlob
|
tpe = restic.TreeBlob
|
||||||
length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB
|
length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB
|
||||||
|
@ -121,8 +125,12 @@ func selectBlobs(t *testing.T, repo restic.Repository, p float32) (list1, list2
|
||||||
}
|
}
|
||||||
|
|
||||||
func listPacks(t *testing.T, repo restic.Lister) restic.IDSet {
|
func listPacks(t *testing.T, repo restic.Lister) restic.IDSet {
|
||||||
|
return listFiles(t, repo, restic.PackFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func listFiles(t *testing.T, repo restic.Lister, tpe backend.FileType) restic.IDSet {
|
||||||
list := restic.NewIDSet()
|
list := restic.NewIDSet()
|
||||||
err := repo.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
|
err := repo.List(context.TODO(), tpe, func(id restic.ID, size int64) error {
|
||||||
list.Insert(id)
|
list.Insert(id)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
@ -166,12 +174,6 @@ func repack(t *testing.T, repo restic.Repository, packs restic.IDSet, blobs rest
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func flush(t *testing.T, repo restic.Repository) {
|
|
||||||
if err := repo.Flush(context.TODO()); err != nil {
|
|
||||||
t.Fatalf("repo.SaveIndex() %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func rebuildIndex(t *testing.T, repo restic.Repository) {
|
func rebuildIndex(t *testing.T, repo restic.Repository) {
|
||||||
err := repo.SetIndex(index.NewMasterIndex())
|
err := repo.SetIndex(index.NewMasterIndex())
|
||||||
rtest.OK(t, err)
|
rtest.OK(t, err)
|
||||||
|
@ -219,7 +221,9 @@ func testRepack(t *testing.T, version uint) {
|
||||||
rand.Seed(seed)
|
rand.Seed(seed)
|
||||||
t.Logf("rand seed is %v", seed)
|
t.Logf("rand seed is %v", seed)
|
||||||
|
|
||||||
createRandomBlobs(t, repo, 100, 0.7)
|
// add a small amount of blobs twice to create multiple pack files
|
||||||
|
createRandomBlobs(t, repo, 10, 0.7, false)
|
||||||
|
createRandomBlobs(t, repo, 10, 0.7, false)
|
||||||
|
|
||||||
packsBefore := listPacks(t, repo)
|
packsBefore := listPacks(t, repo)
|
||||||
|
|
||||||
|
@ -233,8 +237,6 @@ func testRepack(t *testing.T, version uint) {
|
||||||
packsBefore, packsAfter)
|
packsBefore, packsAfter)
|
||||||
}
|
}
|
||||||
|
|
||||||
flush(t, repo)
|
|
||||||
|
|
||||||
removeBlobs, keepBlobs := selectBlobs(t, repo, 0.2)
|
removeBlobs, keepBlobs := selectBlobs(t, repo, 0.2)
|
||||||
|
|
||||||
removePacks := findPacksForBlobs(t, repo, removeBlobs)
|
removePacks := findPacksForBlobs(t, repo, removeBlobs)
|
||||||
|
@ -302,8 +304,9 @@ func testRepackCopy(t *testing.T, version uint) {
|
||||||
rand.Seed(seed)
|
rand.Seed(seed)
|
||||||
t.Logf("rand seed is %v", seed)
|
t.Logf("rand seed is %v", seed)
|
||||||
|
|
||||||
createRandomBlobs(t, repo, 100, 0.7)
|
// add a small amount of blobs twice to create multiple pack files
|
||||||
flush(t, repo)
|
createRandomBlobs(t, repo, 10, 0.7, false)
|
||||||
|
createRandomBlobs(t, repo, 10, 0.7, false)
|
||||||
|
|
||||||
_, keepBlobs := selectBlobs(t, repo, 0.2)
|
_, keepBlobs := selectBlobs(t, repo, 0.2)
|
||||||
copyPacks := findPacksForBlobs(t, repo, keepBlobs)
|
copyPacks := findPacksForBlobs(t, repo, keepBlobs)
|
||||||
|
@ -343,7 +346,7 @@ func testRepackWrongBlob(t *testing.T, version uint) {
|
||||||
rand.Seed(seed)
|
rand.Seed(seed)
|
||||||
t.Logf("rand seed is %v", seed)
|
t.Logf("rand seed is %v", seed)
|
||||||
|
|
||||||
createRandomBlobs(t, repo, 5, 0.7)
|
createRandomBlobs(t, repo, 5, 0.7, false)
|
||||||
createRandomWrongBlob(t, repo)
|
createRandomWrongBlob(t, repo)
|
||||||
|
|
||||||
// just keep all blobs, but also rewrite every pack
|
// just keep all blobs, but also rewrite every pack
|
||||||
|
|
|
@ -0,0 +1,132 @@
|
||||||
|
package repository
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/index"
|
||||||
|
"github.com/restic/restic/internal/pack"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
"github.com/restic/restic/internal/ui/progress"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RepairIndexOptions struct {
|
||||||
|
ReadAllPacks bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, printer progress.Printer) error {
|
||||||
|
var obsoleteIndexes restic.IDs
|
||||||
|
packSizeFromList := make(map[restic.ID]int64)
|
||||||
|
packSizeFromIndex := make(map[restic.ID]int64)
|
||||||
|
removePacks := restic.NewIDSet()
|
||||||
|
|
||||||
|
if opts.ReadAllPacks {
|
||||||
|
// get list of old index files but start with empty index
|
||||||
|
err := repo.List(ctx, restic.IndexFile, func(id restic.ID, _ int64) error {
|
||||||
|
obsoleteIndexes = append(obsoleteIndexes, id)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
printer.P("loading indexes...\n")
|
||||||
|
mi := index.NewMasterIndex()
|
||||||
|
err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, _ bool, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
printer.E("removing invalid index %v: %v\n", id, err)
|
||||||
|
obsoleteIndexes = append(obsoleteIndexes, id)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
mi.Insert(idx)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = mi.MergeFinalIndexes()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = repo.SetIndex(mi)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
packSizeFromIndex, err = pack.Size(ctx, repo.Index(), false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
printer.P("getting pack files to read...\n")
|
||||||
|
err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error {
|
||||||
|
size, ok := packSizeFromIndex[id]
|
||||||
|
if !ok || size != packSize {
|
||||||
|
// Pack was not referenced in index or size does not match
|
||||||
|
packSizeFromList[id] = packSize
|
||||||
|
removePacks.Insert(id)
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
printer.E("adding pack file to index %v\n", id)
|
||||||
|
} else if size != packSize {
|
||||||
|
printer.E("reindexing pack file %v with unexpected size %v instead of %v\n", id, packSize, size)
|
||||||
|
}
|
||||||
|
delete(packSizeFromIndex, id)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for id := range packSizeFromIndex {
|
||||||
|
// forget pack files that are referenced in the index but do not exist
|
||||||
|
// when rebuilding the index
|
||||||
|
removePacks.Insert(id)
|
||||||
|
printer.E("removing not found pack file %v\n", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(packSizeFromList) > 0 {
|
||||||
|
printer.P("reading pack files\n")
|
||||||
|
bar := printer.NewCounter("packs")
|
||||||
|
bar.SetMax(uint64(len(packSizeFromList)))
|
||||||
|
invalidFiles, err := repo.CreateIndexFromPacks(ctx, packSizeFromList, bar)
|
||||||
|
bar.Done()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, id := range invalidFiles {
|
||||||
|
printer.V("skipped incomplete pack file: %v\n", id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = rebuildIndexFiles(ctx, repo, removePacks, obsoleteIndexes, false, printer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// drop outdated in-memory index
|
||||||
|
repo.ClearIndex()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func rebuildIndexFiles(ctx context.Context, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool, printer progress.Printer) error {
|
||||||
|
printer.P("rebuilding index\n")
|
||||||
|
|
||||||
|
bar := printer.NewCounter("packs processed")
|
||||||
|
return repo.Index().Save(ctx, repo, removePacks, extraObsolete, restic.MasterIndexSaveOpts{
|
||||||
|
SaveProgress: bar,
|
||||||
|
DeleteProgress: func() *progress.Counter {
|
||||||
|
return printer.NewCounter("old indexes deleted")
|
||||||
|
},
|
||||||
|
DeleteReport: func(id restic.ID, err error) {
|
||||||
|
if err != nil {
|
||||||
|
printer.VV("failed to remove index %v: %v\n", id.String(), err)
|
||||||
|
} else {
|
||||||
|
printer.VV("removed index %v\n", id.String())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
SkipDeletion: skipDeletion,
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,79 @@
|
||||||
|
package repository_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/backend"
|
||||||
|
"github.com/restic/restic/internal/checker"
|
||||||
|
"github.com/restic/restic/internal/repository"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
"github.com/restic/restic/internal/ui/progress"
|
||||||
|
)
|
||||||
|
|
||||||
|
func listIndex(t *testing.T, repo restic.Lister) restic.IDSet {
|
||||||
|
return listFiles(t, repo, restic.IndexFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRebuildIndex(t *testing.T, readAllPacks bool, damage func(t *testing.T, repo *repository.Repository)) {
|
||||||
|
repo := repository.TestRepository(t).(*repository.Repository)
|
||||||
|
createRandomBlobs(t, repo, 4, 0.5, true)
|
||||||
|
createRandomBlobs(t, repo, 5, 0.5, true)
|
||||||
|
indexes := listIndex(t, repo)
|
||||||
|
t.Logf("old indexes %v", indexes)
|
||||||
|
|
||||||
|
damage(t, repo)
|
||||||
|
|
||||||
|
repo = repository.TestOpenBackend(t, repo.Backend()).(*repository.Repository)
|
||||||
|
rtest.OK(t, repository.RepairIndex(context.TODO(), repo, repository.RepairIndexOptions{
|
||||||
|
ReadAllPacks: readAllPacks,
|
||||||
|
}, &progress.NoopPrinter{}))
|
||||||
|
|
||||||
|
newIndexes := listIndex(t, repo)
|
||||||
|
old := indexes.Intersect(newIndexes)
|
||||||
|
rtest.Assert(t, len(old) == 0, "expected old indexes to be removed, found %v", old)
|
||||||
|
|
||||||
|
checker.TestCheckRepo(t, repo, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRebuildIndex(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
name string
|
||||||
|
damage func(t *testing.T, repo *repository.Repository)
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"valid index",
|
||||||
|
func(t *testing.T, repo *repository.Repository) {},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"damaged index",
|
||||||
|
func(t *testing.T, repo *repository.Repository) {
|
||||||
|
index := listIndex(t, repo).List()[0]
|
||||||
|
replaceFile(t, repo, backend.Handle{Type: restic.IndexFile, Name: index.String()}, func(b []byte) []byte {
|
||||||
|
b[0] ^= 0xff
|
||||||
|
return b
|
||||||
|
})
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"missing index",
|
||||||
|
func(t *testing.T, repo *repository.Repository) {
|
||||||
|
index := listIndex(t, repo).List()[0]
|
||||||
|
rtest.OK(t, repo.Backend().Remove(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: index.String()}))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"missing pack",
|
||||||
|
func(t *testing.T, repo *repository.Repository) {
|
||||||
|
pack := listPacks(t, repo).List()[0]
|
||||||
|
rtest.OK(t, repo.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: pack.String()}))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
testRebuildIndex(t, false, test.damage)
|
||||||
|
testRebuildIndex(t, true, test.damage)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -60,19 +60,7 @@ func RepairPacks(ctx context.Context, repo restic.Repository, ids restic.IDSet,
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove salvaged packs from index
|
// remove salvaged packs from index
|
||||||
printer.P("rebuilding index")
|
err = rebuildIndexFiles(ctx, repo, ids, nil, false, printer)
|
||||||
|
|
||||||
bar = printer.NewCounter("packs processed")
|
|
||||||
err = repo.Index().Save(ctx, repo, ids, nil, restic.MasterIndexSaveOpts{
|
|
||||||
SaveProgress: bar,
|
|
||||||
DeleteProgress: func() *progress.Counter {
|
|
||||||
return printer.NewCounter("old indexes deleted")
|
|
||||||
},
|
|
||||||
DeleteReport: func(id restic.ID, _ error) {
|
|
||||||
printer.VV("removed index %v", id.String())
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@ import (
|
||||||
|
|
||||||
func listBlobs(repo restic.Repository) restic.BlobSet {
|
func listBlobs(repo restic.Repository) restic.BlobSet {
|
||||||
blobs := restic.NewBlobSet()
|
blobs := restic.NewBlobSet()
|
||||||
repo.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
|
_ = repo.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
|
||||||
blobs.Insert(pb.BlobHandle)
|
blobs.Insert(pb.BlobHandle)
|
||||||
})
|
})
|
||||||
return blobs
|
return blobs
|
||||||
|
@ -109,7 +109,7 @@ func testRepairBrokenPack(t *testing.T, version uint) {
|
||||||
rand.Seed(seed)
|
rand.Seed(seed)
|
||||||
t.Logf("rand seed is %v", seed)
|
t.Logf("rand seed is %v", seed)
|
||||||
|
|
||||||
createRandomBlobs(t, repo, 5, 0.7)
|
createRandomBlobs(t, repo, 5, 0.7, true)
|
||||||
packsBefore := listPacks(t, repo)
|
packsBefore := listPacks(t, repo)
|
||||||
blobsBefore := listBlobs(repo)
|
blobsBefore := listBlobs(repo)
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -142,9 +143,6 @@ func (r *Repository) DisableAutoIndexUpdate() {
|
||||||
// setConfig assigns the given config and updates the repository parameters accordingly
|
// setConfig assigns the given config and updates the repository parameters accordingly
|
||||||
func (r *Repository) setConfig(cfg restic.Config) {
|
func (r *Repository) setConfig(cfg restic.Config) {
|
||||||
r.cfg = cfg
|
r.cfg = cfg
|
||||||
if r.cfg.Version >= 2 {
|
|
||||||
r.idx.MarkCompressed()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config returns the repository configuration.
|
// Config returns the repository configuration.
|
||||||
|
@ -637,9 +635,21 @@ func (r *Repository) Index() restic.MasterIndex {
|
||||||
// SetIndex instructs the repository to use the given index.
|
// SetIndex instructs the repository to use the given index.
|
||||||
func (r *Repository) SetIndex(i restic.MasterIndex) error {
|
func (r *Repository) SetIndex(i restic.MasterIndex) error {
|
||||||
r.idx = i.(*index.MasterIndex)
|
r.idx = i.(*index.MasterIndex)
|
||||||
|
r.configureIndex()
|
||||||
return r.prepareCache()
|
return r.prepareCache()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *Repository) ClearIndex() {
|
||||||
|
r.idx = index.NewMasterIndex()
|
||||||
|
r.configureIndex()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Repository) configureIndex() {
|
||||||
|
if r.cfg.Version >= 2 {
|
||||||
|
r.idx.MarkCompressed()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// LoadIndex loads all index files from the backend in parallel and stores them
|
// LoadIndex loads all index files from the backend in parallel and stores them
|
||||||
func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error {
|
func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error {
|
||||||
debug.Log("Loading index")
|
debug.Log("Loading index")
|
||||||
|
@ -662,6 +672,9 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error {
|
||||||
defer p.Done()
|
defer p.Done()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// reset in-memory index before loading it from the repository
|
||||||
|
r.ClearIndex()
|
||||||
|
|
||||||
err = index.ForAllIndexes(ctx, indexList, r, func(_ restic.ID, idx *index.Index, _ bool, err error) error {
|
err = index.ForAllIndexes(ctx, indexList, r, func(_ restic.ID, idx *index.Index, _ bool, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -691,15 +704,21 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error {
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
invalidIndex := false
|
invalidIndex := false
|
||||||
r.idx.Each(ctx, func(blob restic.PackedBlob) {
|
err := r.idx.Each(ctx, func(blob restic.PackedBlob) {
|
||||||
if blob.IsCompressed() {
|
if blob.IsCompressed() {
|
||||||
invalidIndex = true
|
invalidIndex = true
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if invalidIndex {
|
if invalidIndex {
|
||||||
return errors.New("index uses feature not supported by repository version 1")
|
return errors.New("index uses feature not supported by repository version 1")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
// remove index files from the cache which have been removed in the repo
|
// remove index files from the cache which have been removed in the repo
|
||||||
return r.prepareCache()
|
return r.prepareCache()
|
||||||
|
@ -917,6 +936,10 @@ func (r *Repository) Close() error {
|
||||||
// occupies in the repo (compressed or not, including encryption overhead).
|
// occupies in the repo (compressed or not, including encryption overhead).
|
||||||
func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, size int, err error) {
|
func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, size int, err error) {
|
||||||
|
|
||||||
|
if int64(len(buf)) > math.MaxUint32 {
|
||||||
|
return restic.ID{}, false, 0, fmt.Errorf("blob is larger than 4GB")
|
||||||
|
}
|
||||||
|
|
||||||
// compute plaintext hash if not already set
|
// compute plaintext hash if not already set
|
||||||
if id.IsNull() {
|
if id.IsNull() {
|
||||||
// Special case the hash calculation for all zero chunks. This is especially
|
// Special case the hash calculation for all zero chunks. This is especially
|
||||||
|
|
|
@ -242,8 +242,7 @@ func loadIndex(ctx context.Context, repo restic.LoaderUnpacked, id restic.ID) (*
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRepositoryLoadUnpackedBroken(t *testing.T) {
|
func TestRepositoryLoadUnpackedBroken(t *testing.T) {
|
||||||
repo, cleanup := repository.TestFromFixture(t, repoFixture)
|
repo := repository.TestRepository(t)
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
data := rtest.Random(23, 12345)
|
data := rtest.Random(23, 12345)
|
||||||
id := restic.Hash(data)
|
id := restic.Hash(data)
|
||||||
|
@ -252,7 +251,7 @@ func TestRepositoryLoadUnpackedBroken(t *testing.T) {
|
||||||
data[0] ^= 0xff
|
data[0] ^= 0xff
|
||||||
|
|
||||||
// store broken file
|
// store broken file
|
||||||
err := repo.Backend().Save(context.TODO(), h, backend.NewByteReader(data, nil))
|
err := repo.Backend().Save(context.TODO(), h, backend.NewByteReader(data, repo.Backend().Hasher()))
|
||||||
rtest.OK(t, err)
|
rtest.OK(t, err)
|
||||||
|
|
||||||
// without a retry backend this will just return an error that the file is broken
|
// without a retry backend this will just return an error that the file is broken
|
||||||
|
@ -371,13 +370,13 @@ func testRepositoryIncrementalIndex(t *testing.T, version uint) {
|
||||||
idx, err := loadIndex(context.TODO(), repo, id)
|
idx, err := loadIndex(context.TODO(), repo, id)
|
||||||
rtest.OK(t, err)
|
rtest.OK(t, err)
|
||||||
|
|
||||||
idx.Each(context.TODO(), func(pb restic.PackedBlob) {
|
rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) {
|
||||||
if _, ok := packEntries[pb.PackID]; !ok {
|
if _, ok := packEntries[pb.PackID]; !ok {
|
||||||
packEntries[pb.PackID] = make(map[restic.ID]struct{})
|
packEntries[pb.PackID] = make(map[restic.ID]struct{})
|
||||||
}
|
}
|
||||||
|
|
||||||
packEntries[pb.PackID][id] = struct{}{}
|
packEntries[pb.PackID][id] = struct{}{}
|
||||||
})
|
}))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -60,8 +60,11 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, o
|
||||||
t.Fatalf("TestRepository(): new repo failed: %v", err)
|
t.Fatalf("TestRepository(): new repo failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg := restic.TestCreateConfig(t, testChunkerPol, version)
|
if version == 0 {
|
||||||
err = repo.init(context.TODO(), test.TestPassword, cfg)
|
version = restic.StableRepoVersion
|
||||||
|
}
|
||||||
|
pol := testChunkerPol
|
||||||
|
err = repo.Init(context.TODO(), version, test.TestPassword, &pol)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("TestRepository(): initialize repo failed: %v", err)
|
t.Fatalf("TestRepository(): initialize repo failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,22 +51,6 @@ func CreateConfig(version uint) (Config, error) {
|
||||||
return cfg, nil
|
return cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestCreateConfig creates a config for use within tests.
|
|
||||||
func TestCreateConfig(t testing.TB, pol chunker.Pol, version uint) (cfg Config) {
|
|
||||||
cfg.ChunkerPolynomial = pol
|
|
||||||
|
|
||||||
cfg.ID = NewRandomID().String()
|
|
||||||
if version == 0 {
|
|
||||||
version = StableRepoVersion
|
|
||||||
}
|
|
||||||
if version < MinRepoVersion || version > MaxRepoVersion {
|
|
||||||
t.Fatalf("version %d is out of range", version)
|
|
||||||
}
|
|
||||||
cfg.Version = version
|
|
||||||
|
|
||||||
return cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
var checkPolynomial = true
|
var checkPolynomial = true
|
||||||
var checkPolynomialOnce sync.Once
|
var checkPolynomialOnce sync.Once
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ type Repository interface {
|
||||||
|
|
||||||
Index() MasterIndex
|
Index() MasterIndex
|
||||||
LoadIndex(context.Context, *progress.Counter) error
|
LoadIndex(context.Context, *progress.Counter) error
|
||||||
|
ClearIndex()
|
||||||
SetIndex(MasterIndex) error
|
SetIndex(MasterIndex) error
|
||||||
LookupBlobSize(ID, BlobType) (uint, bool)
|
LookupBlobSize(ID, BlobType) (uint, bool)
|
||||||
|
|
||||||
|
@ -102,8 +103,8 @@ type MasterIndex interface {
|
||||||
Lookup(BlobHandle) []PackedBlob
|
Lookup(BlobHandle) []PackedBlob
|
||||||
|
|
||||||
// Each runs fn on all blobs known to the index. When the context is cancelled,
|
// Each runs fn on all blobs known to the index. When the context is cancelled,
|
||||||
// the index iteration return immediately. This blocks any modification of the index.
|
// the index iteration returns immediately with ctx.Err(). This blocks any modification of the index.
|
||||||
Each(ctx context.Context, fn func(PackedBlob))
|
Each(ctx context.Context, fn func(PackedBlob)) error
|
||||||
ListPacks(ctx context.Context, packs IDSet) <-chan PackBlobs
|
ListPacks(ctx context.Context, packs IDSet) <-chan PackBlobs
|
||||||
|
|
||||||
Save(ctx context.Context, repo Repository, excludePacks IDSet, extraObsolete IDs, opts MasterIndexSaveOpts) error
|
Save(ctx context.Context, repo Repository, excludePacks IDSet, extraObsolete IDs, opts MasterIndexSaveOpts) error
|
||||||
|
|
|
@ -45,7 +45,7 @@ func TestCreateSnapshot(t *testing.T) {
|
||||||
t.Fatalf("snapshot has zero tree ID")
|
t.Fatalf("snapshot has zero tree ID")
|
||||||
}
|
}
|
||||||
|
|
||||||
checker.TestCheckRepo(t, repo)
|
checker.TestCheckRepo(t, repo, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkTestCreateSnapshot(t *testing.B) {
|
func BenchmarkTestCreateSnapshot(t *testing.B) {
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
package progress
|
package progress
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
// A Printer can can return a new counter or print messages
|
// A Printer can can return a new counter or print messages
|
||||||
// at different log levels.
|
// at different log levels.
|
||||||
// It must be safe to call its methods from concurrent goroutines.
|
// It must be safe to call its methods from concurrent goroutines.
|
||||||
|
@ -28,3 +30,36 @@ func (*NoopPrinter) P(_ string, _ ...interface{}) {}
|
||||||
func (*NoopPrinter) V(_ string, _ ...interface{}) {}
|
func (*NoopPrinter) V(_ string, _ ...interface{}) {}
|
||||||
|
|
||||||
func (*NoopPrinter) VV(_ string, _ ...interface{}) {}
|
func (*NoopPrinter) VV(_ string, _ ...interface{}) {}
|
||||||
|
|
||||||
|
// TestPrinter prints messages during testing
|
||||||
|
type TestPrinter struct {
|
||||||
|
t testing.TB
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTestPrinter(t testing.TB) *TestPrinter {
|
||||||
|
return &TestPrinter{
|
||||||
|
t: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Printer = (*TestPrinter)(nil)
|
||||||
|
|
||||||
|
func (p *TestPrinter) NewCounter(_ string) *Counter {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *TestPrinter) E(msg string, args ...interface{}) {
|
||||||
|
p.t.Logf("error: "+msg, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *TestPrinter) P(msg string, args ...interface{}) {
|
||||||
|
p.t.Logf("print: "+msg, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *TestPrinter) V(msg string, args ...interface{}) {
|
||||||
|
p.t.Logf("verbose: "+msg, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *TestPrinter) VV(msg string, args ...interface{}) {
|
||||||
|
p.t.Logf("verbose2: "+msg, args...)
|
||||||
|
}
|
||||||
|
|
|
@ -13,7 +13,7 @@ import (
|
||||||
// TestTree is used to construct a list of trees for testing the walker.
|
// TestTree is used to construct a list of trees for testing the walker.
|
||||||
type TestTree map[string]interface{}
|
type TestTree map[string]interface{}
|
||||||
|
|
||||||
// TestNode is used to test the walker.
|
// TestFile is used to test the walker.
|
||||||
type TestFile struct {
|
type TestFile struct {
|
||||||
Size uint64
|
Size uint64
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue