Compare commits

...

28 Commits

Author SHA1 Message Date
DRON-666 18cb0fabd6
Merge 2ed9e36cb3 into faffd15d13 2024-04-24 23:58:42 +00:00
Michael Eischer faffd15d13
Merge pull request #4734 from maouw/enhancement/envvar-for-host
Add support for specifying --host via environment variable
2024-04-24 20:00:15 +00:00
Michael Eischer 347e9d0765 complete RESITC_HOST environment handling & test 2024-04-24 21:52:39 +02:00
Altan Orhon 871ea1eaf3 Add support for specifying --host via environment variable
This commit adds support for specifying the `--host` option via the `RESTIC_HOST` environment variable. This is done by extending option processing in `cmd_backup.go` and for `restic.SnapshotFilter` in `find.go`.
2024-04-24 21:49:42 +02:00
Michael Eischer a7b5e09902
Merge pull request #4753 from MichaelEischer/remove-cleanup-handlers
Replace cleanup handlers with context based command cancelation
2024-04-24 21:34:19 +02:00
Michael Eischer 3f9d50865d
Merge pull request #4776 from MichaelEischer/cleanup-backend-open
unify backend open and create
2024-04-24 21:24:27 +02:00
Michael Eischer 5f263752d7 init: also apply limiter for non-HTTP backend 2024-04-24 20:42:30 +02:00
Michael Eischer 484dbb1cf4 get rid of a few global variables 2024-04-22 22:39:33 +02:00
Michael Eischer 940a3159b5 let index.Each() and pack.Size() return error on canceled context
This forces a caller to actually check that the function did complete.
2024-04-22 22:39:32 +02:00
Michael Eischer 31624aeffd Improve command shutdown on context cancellation 2024-04-22 22:31:38 +02:00
Michael Eischer 910927670f mount: fix exit code on cancellation 2024-04-22 22:27:19 +02:00
Michael Eischer 6f2a4dea21 remove global shutdown hook 2024-04-22 22:27:19 +02:00
Michael Eischer 699ef5e9de debug: replace cleanup handler usage in profiling setup 2024-04-22 22:27:19 +02:00
Michael Eischer eb710a28e8 use standalone shutdown hook for readPasswordTerminal
move terminal restoration into readPasswordTerminal
2024-04-22 22:27:19 +02:00
Michael Eischer 86c7909f41 mount: use standalone shutdown hook via goroutine 2024-04-22 22:27:19 +02:00
Michael Eischer 93135dc705 lock: drop cleanup handler 2024-04-22 22:27:19 +02:00
Michael Eischer 21a7cb405c check: replace cleanup handler 2024-04-22 22:27:19 +02:00
Michael Eischer 6c6dceade3 global: unify backend open and create 2024-04-19 22:26:14 +02:00
DRON-666 2ed9e36cb3 vss: Add tests for "provider" option 2024-01-27 22:16:05 +03:00
DRON-666 22a116a30c vss: Update docs and changelog 2024-01-27 22:16:05 +03:00
DRON-666 3c30543e91 vss: Add "provider" option 2024-01-27 22:16:05 +03:00
DRON-666 7782739ace vss: Fix issues reported by linters 2024-01-27 22:16:05 +03:00
DRON-666 70d0323a0e vss: Change `ErrorHandler` signature
We don't need `error` here: the only existing implementation
of `ErrorHandler` always call `Backup.Error` and all
implementations of `Backup.Error` always return nil.
2024-01-27 22:14:41 +03:00
DRON-666 c2559e6efe vss: Add some tests 2024-01-27 22:14:41 +03:00
DRON-666 a677511536 vss: Update docs and changelog 2024-01-27 22:14:41 +03:00
DRON-666 2d51d72ad2 vss: Add volume filtering
Add options to exclude all mountpoints and arbitrary volumes from snapshotting.
2024-01-27 22:14:41 +03:00
DRON-666 fcddf1f847 vss: Add "timeout" option
Changing multiple "callAsyncFunctionAndWait" with fixed timeout
to calculated timeout based on deadline.
2024-01-27 22:14:41 +03:00
DRON-666 c4e21ac183 vss: Add initial support for extended options 2024-01-27 22:14:41 +03:00
54 changed files with 1209 additions and 432 deletions

View File

@ -0,0 +1,10 @@
Enhancement: Allow specifying `--host` via environment variable
Restic commands that operate on snapshots, such as `restic backup` and
`restic snapshots`, support the `--host` flag to specify the hostname for
grouoping snapshots. They now permit selecting the hostname via the
environment variable `RESTIC_HOST`. `--host` still takes precedence over the
environment variable.
https://github.com/restic/restic/issues/4733
https://github.com/restic/restic/pull/4734

View File

@ -0,0 +1,22 @@
Enhancement: Add options to configure Windows Shadow Copy Service
Restic always used 120 sec. timeout and unconditionally created VSS snapshots
for all volume mount points on disk. Now this behavior can be fine-tuned by
new options, like exclude user specific volumes and mount points or completely
disable auto snapshotting of volume mount points.
For example:
restic backup --use-fs-snapshot -o vss.timeout=5m -o vss.excludeallmountpoints=true
changes timeout to five minutes and disable snapshotting of mount points on all volumes, and
restic backup --use-fs-snapshot -o vss.excludevolumes="d:\;c:\mnt\;\\?\Volume{e2e0315d-9066-4f97-8343-eb5659b35762}"
excludes drive `D:`, mount point `C:\MNT` and specific volume from VSS snapshotting.
restic backup --use-fs-snapshot -o vss.provider={b5946137-7b9f-4925-af80-51abd60b20d5}
uses 'Microsoft Software Shadow Copy provider 1.0' instead of the default provider.
https://github.com/restic/restic/pull/3067

View File

@ -1,89 +1,41 @@
package main
import (
"context"
"os"
"os/signal"
"sync"
"syscall"
"github.com/restic/restic/internal/debug"
)
var cleanupHandlers struct {
sync.Mutex
list []func(code int) (int, error)
done bool
ch chan os.Signal
func createGlobalContext() context.Context {
ctx, cancel := context.WithCancel(context.Background())
ch := make(chan os.Signal, 1)
go cleanupHandler(ch, cancel)
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
return ctx
}
func init() {
cleanupHandlers.ch = make(chan os.Signal, 1)
go CleanupHandler(cleanupHandlers.ch)
signal.Notify(cleanupHandlers.ch, syscall.SIGINT, syscall.SIGTERM)
}
// cleanupHandler handles the SIGINT and SIGTERM signals.
func cleanupHandler(c <-chan os.Signal, cancel context.CancelFunc) {
s := <-c
debug.Log("signal %v received, cleaning up", s)
Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s)
// AddCleanupHandler adds the function f to the list of cleanup handlers so
// that it is executed when all the cleanup handlers are run, e.g. when SIGINT
// is received.
func AddCleanupHandler(f func(code int) (int, error)) {
cleanupHandlers.Lock()
defer cleanupHandlers.Unlock()
// reset the done flag for integration tests
cleanupHandlers.done = false
cleanupHandlers.list = append(cleanupHandlers.list, f)
}
// RunCleanupHandlers runs all registered cleanup handlers
func RunCleanupHandlers(code int) int {
cleanupHandlers.Lock()
defer cleanupHandlers.Unlock()
if cleanupHandlers.done {
return code
if val, _ := os.LookupEnv("RESTIC_DEBUG_STACKTRACE_SIGINT"); val != "" {
_, _ = os.Stderr.WriteString("\n--- STACKTRACE START ---\n\n")
_, _ = os.Stderr.WriteString(debug.DumpStacktrace())
_, _ = os.Stderr.WriteString("\n--- STACKTRACE END ---\n")
}
cleanupHandlers.done = true
for _, f := range cleanupHandlers.list {
var err error
code, err = f(code)
if err != nil {
Warnf("error in cleanup handler: %v\n", err)
}
}
cleanupHandlers.list = nil
return code
cancel()
}
// CleanupHandler handles the SIGINT and SIGTERM signals.
func CleanupHandler(c <-chan os.Signal) {
for s := range c {
debug.Log("signal %v received, cleaning up", s)
Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s)
if val, _ := os.LookupEnv("RESTIC_DEBUG_STACKTRACE_SIGINT"); val != "" {
_, _ = os.Stderr.WriteString("\n--- STACKTRACE START ---\n\n")
_, _ = os.Stderr.WriteString(debug.DumpStacktrace())
_, _ = os.Stderr.WriteString("\n--- STACKTRACE END ---\n")
}
code := 0
if s == syscall.SIGINT || s == syscall.SIGTERM {
code = 130
} else {
code = 1
}
Exit(code)
}
}
// Exit runs the cleanup handlers and then terminates the process with the
// given exit code.
// Exit terminates the process with the given exit code.
func Exit(code int) {
code = RunCleanupHandlers(code)
debug.Log("exiting with status code %d", code)
os.Exit(code)
}

View File

@ -114,7 +114,7 @@ func init() {
f.BoolVar(&backupOptions.StdinCommand, "stdin-from-command", false, "interpret arguments as command to execute and store its stdout")
f.Var(&backupOptions.Tags, "tag", "add `tags` for the new snapshot in the format `tag[,tag,...]` (can be specified multiple times)")
f.UintVar(&backupOptions.ReadConcurrency, "read-concurrency", 0, "read `n` files concurrently (default: $RESTIC_READ_CONCURRENCY or 2)")
f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually. To prevent an expensive rescan use the \"parent\" flag")
f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually (default: $RESTIC_HOST). To prevent an expensive rescan use the \"parent\" flag")
f.StringVar(&backupOptions.Host, "hostname", "", "set the `hostname` for the snapshot manually")
err := f.MarkDeprecated("hostname", "use --host")
if err != nil {
@ -137,6 +137,11 @@ func init() {
// parse read concurrency from env, on error the default value will be used
readConcurrency, _ := strconv.ParseUint(os.Getenv("RESTIC_READ_CONCURRENCY"), 10, 32)
backupOptions.ReadConcurrency = uint(readConcurrency)
// parse host from env, if not exists or empty the default value will be used
if host := os.Getenv("RESTIC_HOST"); host != "" {
backupOptions.Host = host
}
}
// filterExisting returns a slice of all existing items, or an error if no
@ -440,7 +445,16 @@ func findParentSnapshot(ctx context.Context, repo restic.ListerLoaderUnpacked, o
}
func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error {
err := opts.Check(gopts, args)
var vsscfg fs.VSSConfig
var err error
if runtime.GOOS == "windows" {
if vsscfg, err = fs.ParseVSSConfig(gopts.extended); err != nil {
return err
}
}
err = opts.Check(gopts, args)
if err != nil {
return err
}
@ -542,8 +556,8 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
return err
}
errorHandler := func(item string, err error) error {
return progressReporter.Error(item, err)
errorHandler := func(item string, err error) {
_ = progressReporter.Error(item, err)
}
messageHandler := func(msg string, args ...interface{}) {
@ -552,7 +566,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
}
}
localVss := fs.NewLocalVss(errorHandler, messageHandler)
localVss := fs.NewLocalVss(errorHandler, messageHandler, vsscfg)
defer localVss.DeleteSnapshots()
targetFS = localVss
}

View File

@ -199,10 +199,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
}
cleanup := prepareCheckCache(opts, &gopts)
AddCleanupHandler(func(code int) (int, error) {
cleanup()
return code, nil
})
defer cleanup()
if !gopts.NoLock {
Verbosef("create exclusive lock for repository\n")
@ -222,6 +219,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
Verbosef("load indexes\n")
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
hints, errs := chkr.LoadIndex(ctx, bar)
if ctx.Err() != nil {
return ctx.Err()
}
errorsFound := false
suggestIndexRebuild := false
@ -283,6 +283,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
if orphanedPacks > 0 {
Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks)
}
if ctx.Err() != nil {
return ctx.Err()
}
Verbosef("check snapshots, trees and blobs\n")
errChan = make(chan error)
@ -316,9 +319,16 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
// Must happen after `errChan` is read from in the above loop to avoid
// deadlocking in the case of errors.
wg.Wait()
if ctx.Err() != nil {
return ctx.Err()
}
if opts.CheckUnused {
for _, id := range chkr.UnusedBlobs(ctx) {
unused, err := chkr.UnusedBlobs(ctx)
if err != nil {
return err
}
for _, id := range unused {
Verbosef("unused blob %v\n", id)
errorsFound = true
}
@ -395,10 +405,13 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
doReadData(packs)
}
if ctx.Err() != nil {
return ctx.Err()
}
if errorsFound {
return errors.Fatal("repository contains errors")
}
Verbosef("no errors were found\n")
return nil

View File

@ -53,7 +53,7 @@ func init() {
}
func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []string) error {
secondaryGopts, isFromRepo, err := fillSecondaryGlobalOpts(opts.secondaryRepoOptions, gopts, "destination")
secondaryGopts, isFromRepo, err := fillSecondaryGlobalOpts(ctx, opts.secondaryRepoOptions, gopts, "destination")
if err != nil {
return err
}
@ -103,6 +103,9 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
// also consider identical snapshot copies
dstSnapshotByOriginal[*sn.ID()] = append(dstSnapshotByOriginal[*sn.ID()], sn)
}
if ctx.Err() != nil {
return ctx.Err()
}
// remember already processed trees across all snapshots
visitedTrees := restic.NewIDSet()
@ -147,7 +150,7 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
}
Verbosef("snapshot %s saved\n", newID.Str())
}
return nil
return ctx.Err()
}
func similarSnapshots(sna *restic.Snapshot, snb *restic.Snapshot) bool {

View File

@ -439,7 +439,10 @@ func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error {
if err != errAllPacksFound {
// try to resolve unknown pack ids from the index
packIDs = f.indexPacksToBlobs(ctx, packIDs)
packIDs, err = f.indexPacksToBlobs(ctx, packIDs)
if err != nil {
return err
}
}
if len(packIDs) > 0 {
@ -456,13 +459,13 @@ func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error {
return nil
}
func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struct{}) map[string]struct{} {
func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struct{}) (map[string]struct{}, error) {
wctx, cancel := context.WithCancel(ctx)
defer cancel()
// remember which packs were found in the index
indexPackIDs := make(map[string]struct{})
f.repo.Index().Each(wctx, func(pb restic.PackedBlob) {
err := f.repo.Index().Each(wctx, func(pb restic.PackedBlob) {
idStr := pb.PackID.String()
// keep entry in packIDs as Each() returns individual index entries
matchingID := false
@ -481,6 +484,9 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc
indexPackIDs[idStr] = struct{}{}
}
})
if err != nil {
return nil, err
}
for id := range indexPackIDs {
delete(packIDs, id)
@ -493,7 +499,7 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc
}
Warnf("some pack files are missing from the repository, getting their blobs from the repository index: %v\n\n", list)
}
return packIDs
return packIDs, nil
}
func (f *Finder) findObjectPack(id string, t restic.BlobType) {
@ -608,6 +614,9 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, opts.Snapshots) {
filteredSnapshots = append(filteredSnapshots, sn)
}
if ctx.Err() != nil {
return ctx.Err()
}
sort.Slice(filteredSnapshots, func(i, j int) bool {
return filteredSnapshots[i].Time.Before(filteredSnapshots[j].Time)

View File

@ -188,6 +188,9 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) {
snapshots = append(snapshots, sn)
}
if ctx.Err() != nil {
return ctx.Err()
}
var jsonGroups []*ForgetGroup
@ -270,6 +273,10 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
}
}
if ctx.Err() != nil {
return ctx.Err()
}
if len(removeSnIDs) > 0 {
if !opts.DryRun {
bar := printer.NewCounter("files deleted")

View File

@ -80,7 +80,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
return err
}
gopts.password, err = ReadPasswordTwice(gopts,
gopts.password, err = ReadPasswordTwice(ctx, gopts,
"enter password for new repository: ",
"enter password again: ")
if err != nil {
@ -131,7 +131,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
func maybeReadChunkerPolynomial(ctx context.Context, opts InitOptions, gopts GlobalOptions) (*chunker.Pol, error) {
if opts.CopyChunkerParameters {
otherGopts, _, err := fillSecondaryGlobalOpts(opts.secondaryRepoOptions, gopts, "secondary")
otherGopts, _, err := fillSecondaryGlobalOpts(ctx, opts.secondaryRepoOptions, gopts, "secondary")
if err != nil {
return nil, err
}

View File

@ -60,7 +60,7 @@ func runKeyAdd(ctx context.Context, gopts GlobalOptions, opts KeyAddOptions, arg
}
func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyAddOptions) error {
pw, err := getNewPassword(gopts, opts.NewPasswordFile)
pw, err := getNewPassword(ctx, gopts, opts.NewPasswordFile)
if err != nil {
return err
}
@ -83,7 +83,7 @@ func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOption
// testKeyNewPassword is used to set a new password during integration testing.
var testKeyNewPassword string
func getNewPassword(gopts GlobalOptions, newPasswordFile string) (string, error) {
func getNewPassword(ctx context.Context, gopts GlobalOptions, newPasswordFile string) (string, error) {
if testKeyNewPassword != "" {
return testKeyNewPassword, nil
}
@ -97,7 +97,7 @@ func getNewPassword(gopts GlobalOptions, newPasswordFile string) (string, error)
newopts := gopts
newopts.password = ""
return ReadPasswordTwice(newopts,
return ReadPasswordTwice(ctx, newopts,
"enter new password: ",
"enter password again: ")
}

View File

@ -57,7 +57,7 @@ func runKeyPasswd(ctx context.Context, gopts GlobalOptions, opts KeyPasswdOption
}
func changePassword(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyPasswdOptions) error {
pw, err := getNewPassword(gopts, opts.NewPasswordFile)
pw, err := getNewPassword(ctx, gopts, opts.NewPasswordFile)
if err != nil {
return err
}

View File

@ -59,10 +59,9 @@ func runList(ctx context.Context, gopts GlobalOptions, args []string) error {
if err != nil {
return err
}
idx.Each(ctx, func(blobs restic.PackedBlob) {
return idx.Each(ctx, func(blobs restic.PackedBlob) {
Printf("%v %v\n", blobs.Type, blobs.ID)
})
return nil
})
default:
return errors.Fatal("invalid type")

View File

@ -152,28 +152,15 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args
}
}
AddCleanupHandler(func(code int) (int, error) {
debug.Log("running umount cleanup handler for mount at %v", mountpoint)
err := umount(mountpoint)
if err != nil {
Warnf("unable to umount (maybe already umounted or still in use?): %v\n", err)
}
// replace error code of sigint
if code == 130 {
code = 0
}
return code, nil
})
systemFuse.Debug = func(msg interface{}) {
debug.Log("fuse: %v", msg)
}
c, err := systemFuse.Mount(mountpoint, mountOptions...)
if err != nil {
return err
}
systemFuse.Debug = func(msg interface{}) {
debug.Log("fuse: %v", msg)
}
cfg := fuse.Config{
OwnerIsRoot: opts.OwnerRoot,
Filter: opts.SnapshotFilter,
@ -187,15 +174,26 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args
Printf("When finished, quit with Ctrl-c here or umount the mountpoint.\n")
debug.Log("serving mount at %v", mountpoint)
err = fs.Serve(c, root)
if err != nil {
return err
done := make(chan struct{})
go func() {
defer close(done)
err = fs.Serve(c, root)
}()
select {
case <-ctx.Done():
debug.Log("running umount cleanup handler for mount at %v", mountpoint)
err := systemFuse.Unmount(mountpoint)
if err != nil {
Warnf("unable to umount (maybe already umounted or still in use?): %v\n", err)
}
return ErrOK
case <-done:
// clean shutdown, nothing to do
}
<-c.Ready
return c.MountError
}
func umount(mountpoint string) error {
return systemFuse.Unmount(mountpoint)
return err
}

View File

@ -12,6 +12,7 @@ import (
"testing"
"time"
systemFuse "github.com/anacrolix/fuse"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
@ -65,7 +66,7 @@ func testRunMount(t testing.TB, gopts GlobalOptions, dir string, wg *sync.WaitGr
func testRunUmount(t testing.TB, dir string) {
var err error
for i := 0; i < mountWait; i++ {
if err = umount(dir); err == nil {
if err = systemFuse.Unmount(dir); err == nil {
t.Logf("directory %v umounted", dir)
return
}

View File

@ -197,6 +197,9 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption
if err != nil {
return err
}
if ctx.Err() != nil {
return ctx.Err()
}
if popts.DryRun {
printer.P("\nWould have made the following changes:")

View File

@ -61,16 +61,22 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error {
// tree. If it is not referenced, we have a root tree.
trees := make(map[restic.ID]bool)
repo.Index().Each(ctx, func(blob restic.PackedBlob) {
err = repo.Index().Each(ctx, func(blob restic.PackedBlob) {
if blob.Type == restic.TreeBlob {
trees[blob.Blob.ID] = false
}
})
if err != nil {
return err
}
Verbosef("load %d trees\n", len(trees))
bar = newProgressMax(!gopts.Quiet, uint64(len(trees)), "trees loaded")
for id := range trees {
tree, err := restic.LoadTree(ctx, repo, id)
if ctx.Err() != nil {
return ctx.Err()
}
if err != nil {
Warnf("unable to load tree %v: %v\n", id.Str(), err)
continue

View File

@ -145,6 +145,9 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt
changedCount++
}
}
if ctx.Err() != nil {
return ctx.Err()
}
Verbosef("\n")
if changedCount == 0 {

View File

@ -294,6 +294,9 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a
changedCount++
}
}
if ctx.Err() != nil {
return ctx.Err()
}
Verbosef("\n")
if changedCount == 0 {

View File

@ -69,6 +69,9 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions
for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) {
snapshots = append(snapshots, sn)
}
if ctx.Err() != nil {
return ctx.Err()
}
snapshotGroups, grouped, err := restic.GroupSnapshots(snapshots, opts.GroupBy)
if err != nil {
return err

View File

@ -117,9 +117,8 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args
return fmt.Errorf("error walking snapshot: %v", err)
}
}
if err != nil {
return err
if ctx.Err() != nil {
return ctx.Err()
}
if opts.countMode == countModeRawData {
@ -352,7 +351,10 @@ func statsDebug(ctx context.Context, repo restic.Repository) error {
Warnf("File Type: %v\n%v\n", t, hist)
}
hist := statsDebugBlobs(ctx, repo)
hist, err := statsDebugBlobs(ctx, repo)
if err != nil {
return err
}
for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} {
Warnf("Blob Type: %v\n%v\n\n", t, hist[t])
}
@ -370,17 +372,17 @@ func statsDebugFileType(ctx context.Context, repo restic.Lister, tpe restic.File
return hist, err
}
func statsDebugBlobs(ctx context.Context, repo restic.Repository) [restic.NumBlobTypes]*sizeHistogram {
func statsDebugBlobs(ctx context.Context, repo restic.Repository) ([restic.NumBlobTypes]*sizeHistogram, error) {
var hist [restic.NumBlobTypes]*sizeHistogram
for i := 0; i < len(hist); i++ {
hist[i] = newSizeHistogram(2 * chunker.MaxSize)
}
repo.Index().Each(ctx, func(pb restic.PackedBlob) {
err := repo.Index().Each(ctx, func(pb restic.PackedBlob) {
hist[pb.Type].Add(uint64(pb.Length))
})
return hist
return hist, err
}
type sizeClass struct {

View File

@ -122,6 +122,9 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st
changeCnt++
}
}
if ctx.Err() != nil {
return ctx.Err()
}
if changeCnt == 0 {
Verbosef("no snapshots were modified\n")
} else {

View File

@ -2,6 +2,7 @@ package main
import (
"context"
"os"
"github.com/restic/restic/internal/restic"
"github.com/spf13/pflag"
@ -14,17 +15,27 @@ func initMultiSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter,
if !addHostShorthand {
hostShorthand = ""
}
flags.StringArrayVarP(&filt.Hosts, "host", hostShorthand, nil, "only consider snapshots for this `host` (can be specified multiple times)")
flags.StringArrayVarP(&filt.Hosts, "host", hostShorthand, nil, "only consider snapshots for this `host` (can be specified multiple times) (default: $RESTIC_HOST)")
flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]` (can be specified multiple times)")
flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path` (can be specified multiple times)")
// set default based on env if set
if host := os.Getenv("RESTIC_HOST"); host != "" {
filt.Hosts = []string{host}
}
}
// initSingleSnapshotFilter is used for commands that work on a single snapshot
// MUST be combined with restic.FindFilteredSnapshot
func initSingleSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter) {
flags.StringArrayVarP(&filt.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when snapshot ID \"latest\" is given (can be specified multiple times)")
flags.StringArrayVarP(&filt.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when snapshot ID \"latest\" is given (can be specified multiple times) (default: $RESTIC_HOST)")
flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]`, when snapshot ID \"latest\" is given (can be specified multiple times)")
flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path`, when snapshot ID \"latest\" is given (can be specified multiple times)")
// set default based on env if set
if host := os.Getenv("RESTIC_HOST"); host != "" {
filt.Hosts = []string{host}
}
}
// FindFilteredSnapshots yields Snapshots, either given explicitly by `snapshotIDs` or filtered from the list of all snapshots.

61
cmd/restic/find_test.go Normal file
View File

@ -0,0 +1,61 @@
package main
import (
"testing"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
"github.com/spf13/pflag"
)
func TestSnapshotFilter(t *testing.T) {
for _, test := range []struct {
name string
args []string
expected []string
env string
}{
{
"no value",
[]string{},
nil,
"",
},
{
"args only",
[]string{"--host", "abc"},
[]string{"abc"},
"",
},
{
"env default",
[]string{},
[]string{"def"},
"def",
},
{
"both",
[]string{"--host", "abc"},
[]string{"abc"},
"def",
},
} {
t.Run(test.name, func(t *testing.T) {
t.Setenv("RESTIC_HOST", test.env)
for _, mode := range []bool{false, true} {
set := pflag.NewFlagSet("test", pflag.PanicOnError)
flt := &restic.SnapshotFilter{}
if mode {
initMultiSnapshotFilter(set, flt, false)
} else {
initSingleSnapshotFilter(set, flt)
}
err := set.Parse(test.args)
rtest.OK(t, err)
rtest.Equals(t, test.expected, flt.Hosts, "unexpected hosts")
}
})
}
}

View File

@ -43,7 +43,7 @@ import (
"golang.org/x/term"
)
var version = "0.16.4-dev (compiled manually)"
const version = "0.16.4-dev (compiled manually)"
// TimeFormat is the format used for all timestamps printed by restic.
const TimeFormat = "2006-01-02 15:04:05"
@ -96,9 +96,6 @@ var globalOptions = GlobalOptions{
stderr: os.Stderr,
}
var isReadingPassword bool
var internalGlobalCtx context.Context
func init() {
backends := location.NewRegistry()
backends.Register(azure.NewFactory())
@ -112,15 +109,6 @@ func init() {
backends.Register(swift.NewFactory())
globalOptions.backends = backends
var cancel context.CancelFunc
internalGlobalCtx, cancel = context.WithCancel(context.Background())
AddCleanupHandler(func(code int) (int, error) {
// Must be called before the unlock cleanup handler to ensure that the latter is
// not blocked due to limited number of backend connections, see #1434
cancel()
return code, nil
})
f := cmdRoot.PersistentFlags()
f.StringVarP(&globalOptions.Repo, "repo", "r", "", "`repository` to backup to or restore from (default: $RESTIC_REPOSITORY)")
f.StringVarP(&globalOptions.RepositoryFile, "repository-file", "", "", "`file` to read the repository location from (default: $RESTIC_REPOSITORY_FILE)")
@ -165,8 +153,6 @@ func init() {
// parse target pack size from env, on error the default value will be used
targetPackSize, _ := strconv.ParseUint(os.Getenv("RESTIC_PACK_SIZE"), 10, 32)
globalOptions.PackSize = uint(targetPackSize)
restoreTerminal()
}
func stdinIsTerminal() bool {
@ -191,40 +177,6 @@ func stdoutTerminalWidth() int {
return w
}
// restoreTerminal installs a cleanup handler that restores the previous
// terminal state on exit. This handler is only intended to restore the
// terminal configuration if restic exits after receiving a signal. A regular
// program execution must revert changes to the terminal configuration itself.
// The terminal configuration is only restored while reading a password.
func restoreTerminal() {
if !term.IsTerminal(int(os.Stdout.Fd())) {
return
}
fd := int(os.Stdout.Fd())
state, err := term.GetState(fd)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err)
return
}
AddCleanupHandler(func(code int) (int, error) {
// Restoring the terminal configuration while restic runs in the
// background, causes restic to get stopped on unix systems with
// a SIGTTOU signal. Thus only restore the terminal settings if
// they might have been modified, which is the case while reading
// a password.
if !isReadingPassword {
return code, nil
}
err := term.Restore(fd, state)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err)
}
return code, err
})
}
// ClearLine creates a platform dependent string to clear the current
// line, so it can be overwritten.
//
@ -333,24 +285,48 @@ func readPassword(in io.Reader) (password string, err error) {
// readPasswordTerminal reads the password from the given reader which must be a
// tty. Prompt is printed on the writer out before attempting to read the
// password.
func readPasswordTerminal(in *os.File, out io.Writer, prompt string) (password string, err error) {
fmt.Fprint(out, prompt)
isReadingPassword = true
buf, err := term.ReadPassword(int(in.Fd()))
isReadingPassword = false
fmt.Fprintln(out)
// password. If the context is canceled, the function leaks the password reading
// goroutine.
func readPasswordTerminal(ctx context.Context, in *os.File, out *os.File, prompt string) (password string, err error) {
fd := int(out.Fd())
state, err := term.GetState(fd)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err)
return "", err
}
done := make(chan struct{})
var buf []byte
go func() {
defer close(done)
fmt.Fprint(out, prompt)
buf, err = term.ReadPassword(int(in.Fd()))
fmt.Fprintln(out)
}()
select {
case <-ctx.Done():
err := term.Restore(fd, state)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err)
}
return "", ctx.Err()
case <-done:
// clean shutdown, nothing to do
}
if err != nil {
return "", errors.Wrap(err, "ReadPassword")
}
password = string(buf)
return password, nil
return string(buf), nil
}
// ReadPassword reads the password from a password file, the environment
// variable RESTIC_PASSWORD or prompts the user.
func ReadPassword(opts GlobalOptions, prompt string) (string, error) {
// variable RESTIC_PASSWORD or prompts the user. If the context is canceled,
// the function leaks the password reading goroutine.
func ReadPassword(ctx context.Context, opts GlobalOptions, prompt string) (string, error) {
if opts.password != "" {
return opts.password, nil
}
@ -361,7 +337,7 @@ func ReadPassword(opts GlobalOptions, prompt string) (string, error) {
)
if stdinIsTerminal() {
password, err = readPasswordTerminal(os.Stdin, os.Stderr, prompt)
password, err = readPasswordTerminal(ctx, os.Stdin, os.Stderr, prompt)
} else {
password, err = readPassword(os.Stdin)
Verbosef("reading repository password from stdin\n")
@ -379,14 +355,15 @@ func ReadPassword(opts GlobalOptions, prompt string) (string, error) {
}
// ReadPasswordTwice calls ReadPassword two times and returns an error when the
// passwords don't match.
func ReadPasswordTwice(gopts GlobalOptions, prompt1, prompt2 string) (string, error) {
pw1, err := ReadPassword(gopts, prompt1)
// passwords don't match. If the context is canceled, the function leaks the
// password reading goroutine.
func ReadPasswordTwice(ctx context.Context, gopts GlobalOptions, prompt1, prompt2 string) (string, error) {
pw1, err := ReadPassword(ctx, gopts, prompt1)
if err != nil {
return "", err
}
if stdinIsTerminal() {
pw2, err := ReadPassword(gopts, prompt2)
pw2, err := ReadPassword(ctx, gopts, prompt2)
if err != nil {
return "", err
}
@ -469,7 +446,10 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi
}
for ; passwordTriesLeft > 0; passwordTriesLeft-- {
opts.password, err = ReadPassword(opts, "enter password for repository: ")
opts.password, err = ReadPassword(ctx, opts, "enter password for repository: ")
if ctx.Err() != nil {
return nil, ctx.Err()
}
if err != nil && passwordTriesLeft > 1 {
opts.password = ""
fmt.Printf("%s. Try again\n", err)
@ -570,16 +550,13 @@ func parseConfig(loc location.Location, opts options.Options) (interface{}, erro
return cfg, nil
}
// Open the backend specified by a location config.
func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) {
func innerOpen(ctx context.Context, s string, gopts GlobalOptions, opts options.Options, create bool) (backend.Backend, error) {
debug.Log("parsing location %v", location.StripPassword(gopts.backends, s))
loc, err := location.Parse(gopts.backends, s)
if err != nil {
return nil, errors.Fatalf("parsing repository location failed: %v", err)
}
var be backend.Backend
cfg, err := parseConfig(loc, opts)
if err != nil {
return nil, err
@ -599,7 +576,13 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
}
be, err = factory.Open(ctx, cfg, rt, lim)
var be backend.Backend
if create {
be, err = factory.Create(ctx, cfg, rt, lim)
} else {
be, err = factory.Open(ctx, cfg, rt, lim)
}
if err != nil {
return nil, errors.Fatalf("unable to open repository at %v: %v", location.StripPassword(gopts.backends, s), err)
}
@ -615,6 +598,17 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
}
}
return be, nil
}
// Open the backend specified by a location config.
func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) {
be, err := innerOpen(ctx, s, gopts, opts, false)
if err != nil {
return nil, err
}
// check if config is there
fi, err := be.Stat(ctx, backend.Handle{Type: restic.ConfigFile})
if err != nil {
@ -630,31 +624,5 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
// Create the backend specified by URI.
func create(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) {
debug.Log("parsing location %v", location.StripPassword(gopts.backends, s))
loc, err := location.Parse(gopts.backends, s)
if err != nil {
return nil, err
}
cfg, err := parseConfig(loc, opts)
if err != nil {
return nil, err
}
rt, err := backend.Transport(globalOptions.TransportOptions)
if err != nil {
return nil, errors.Fatal(err.Error())
}
factory := gopts.backends.Lookup(loc.Scheme)
if factory == nil {
return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
}
be, err := factory.Create(ctx, cfg, rt, nil)
if err != nil {
return nil, err
}
return logger.New(sema.NewBackend(be)), nil
return innerOpen(ctx, s, gopts, opts, true)
}

View File

@ -15,23 +15,28 @@ import (
"github.com/pkg/profile"
)
var (
listenProfile string
memProfilePath string
cpuProfilePath string
traceProfilePath string
blockProfilePath string
insecure bool
)
type ProfileOptions struct {
listen string
memPath string
cpuPath string
tracePath string
blockPath string
insecure bool
}
var profileOpts ProfileOptions
var prof interface {
Stop()
}
func init() {
f := cmdRoot.PersistentFlags()
f.StringVar(&listenProfile, "listen-profile", "", "listen on this `address:port` for memory profiling")
f.StringVar(&memProfilePath, "mem-profile", "", "write memory profile to `dir`")
f.StringVar(&cpuProfilePath, "cpu-profile", "", "write cpu profile to `dir`")
f.StringVar(&traceProfilePath, "trace-profile", "", "write trace to `dir`")
f.StringVar(&blockProfilePath, "block-profile", "", "write block profile to `dir`")
f.BoolVar(&insecure, "insecure-kdf", false, "use insecure KDF settings")
f.StringVar(&profileOpts.listen, "listen-profile", "", "listen on this `address:port` for memory profiling")
f.StringVar(&profileOpts.memPath, "mem-profile", "", "write memory profile to `dir`")
f.StringVar(&profileOpts.cpuPath, "cpu-profile", "", "write cpu profile to `dir`")
f.StringVar(&profileOpts.tracePath, "trace-profile", "", "write trace to `dir`")
f.StringVar(&profileOpts.blockPath, "block-profile", "", "write block profile to `dir`")
f.BoolVar(&profileOpts.insecure, "insecure-kdf", false, "use insecure KDF settings")
}
type fakeTestingTB struct{}
@ -41,10 +46,10 @@ func (fakeTestingTB) Logf(msg string, args ...interface{}) {
}
func runDebug() error {
if listenProfile != "" {
fmt.Fprintf(os.Stderr, "running profile HTTP server on %v\n", listenProfile)
if profileOpts.listen != "" {
fmt.Fprintf(os.Stderr, "running profile HTTP server on %v\n", profileOpts.listen)
go func() {
err := http.ListenAndServe(listenProfile, nil)
err := http.ListenAndServe(profileOpts.listen, nil)
if err != nil {
fmt.Fprintf(os.Stderr, "profile HTTP server listen failed: %v\n", err)
}
@ -52,16 +57,16 @@ func runDebug() error {
}
profilesEnabled := 0
if memProfilePath != "" {
if profileOpts.memPath != "" {
profilesEnabled++
}
if cpuProfilePath != "" {
if profileOpts.cpuPath != "" {
profilesEnabled++
}
if traceProfilePath != "" {
if profileOpts.tracePath != "" {
profilesEnabled++
}
if blockProfilePath != "" {
if profileOpts.blockPath != "" {
profilesEnabled++
}
@ -69,30 +74,25 @@ func runDebug() error {
return errors.Fatal("only one profile (memory, CPU, trace, or block) may be activated at the same time")
}
var prof interface {
Stop()
if profileOpts.memPath != "" {
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(profileOpts.memPath))
} else if profileOpts.cpuPath != "" {
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(profileOpts.cpuPath))
} else if profileOpts.tracePath != "" {
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.TraceProfile, profile.ProfilePath(profileOpts.tracePath))
} else if profileOpts.blockPath != "" {
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.BlockProfile, profile.ProfilePath(profileOpts.blockPath))
}
if memProfilePath != "" {
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(memProfilePath))
} else if cpuProfilePath != "" {
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(cpuProfilePath))
} else if traceProfilePath != "" {
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.TraceProfile, profile.ProfilePath(traceProfilePath))
} else if blockProfilePath != "" {
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.BlockProfile, profile.ProfilePath(blockProfilePath))
}
if prof != nil {
AddCleanupHandler(func(code int) (int, error) {
prof.Stop()
return code, nil
})
}
if insecure {
if profileOpts.insecure {
repository.TestUseLowSecurityKDFParameters(fakeTestingTB{})
}
return nil
}
func stopDebug() {
if prof != nil {
prof.Stop()
}
}

View File

@ -5,3 +5,6 @@ package main
// runDebug is a noop without the debug tag.
func runDebug() error { return nil }
// stopDebug is a noop without the debug tag.
func stopDebug() {}

View File

@ -252,11 +252,11 @@ func listTreePacks(gopts GlobalOptions, t *testing.T) restic.IDSet {
rtest.OK(t, r.LoadIndex(ctx, nil))
treePacks := restic.NewIDSet()
r.Index().Each(ctx, func(pb restic.PackedBlob) {
rtest.OK(t, r.Index().Each(ctx, func(pb restic.PackedBlob) {
if pb.Type == restic.TreeBlob {
treePacks.Insert(pb.PackID)
}
})
}))
return treePacks
}
@ -280,11 +280,11 @@ func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, rem
rtest.OK(t, r.LoadIndex(ctx, nil))
treePacks := restic.NewIDSet()
r.Index().Each(ctx, func(pb restic.PackedBlob) {
rtest.OK(t, r.Index().Each(ctx, func(pb restic.PackedBlob) {
if pb.Type == restic.TreeBlob {
treePacks.Insert(pb.PackID)
}
})
}))
// remove all packs containing data blobs
rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error {

View File

@ -21,18 +21,11 @@ func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun boo
Verbosef("%s", msg)
}
}, Warnf)
unlock = lock.Unlock
// make sure that a repository is unlocked properly and after cancel() was
// called by the cleanup handler in global.go
AddCleanupHandler(func(code int) (int, error) {
lock.Unlock()
return code, nil
})
if err != nil {
return nil, nil, nil, err
}
unlock = lock.Unlock
} else {
repo.SetDryRun()
}

View File

@ -3,6 +3,7 @@ package main
import (
"bufio"
"bytes"
"context"
"fmt"
"log"
"os"
@ -24,6 +25,8 @@ func init() {
_, _ = maxprocs.Set()
}
var ErrOK = errors.New("ok")
// cmdRoot is the base command when no other command has been specified.
var cmdRoot = &cobra.Command{
Use: "restic",
@ -74,6 +77,9 @@ The full documentation can be found at https://restic.readthedocs.io/ .
// enabled)
return runDebug()
},
PersistentPostRun: func(_ *cobra.Command, _ []string) {
stopDebug()
},
}
// Distinguish commands that need the password from those that work without,
@ -88,8 +94,6 @@ func needsPassword(cmd string) bool {
}
}
var logBuffer = bytes.NewBuffer(nil)
func tweakGoGC() {
// lower GOGC from 100 to 50, unless it was manually overwritten by the user
oldValue := godebug.SetGCPercent(50)
@ -102,6 +106,7 @@ func main() {
tweakGoGC()
// install custom global logger into a buffer, if an error occurs
// we can show the logs
logBuffer := bytes.NewBuffer(nil)
log.SetOutput(logBuffer)
err := feature.Flag.Apply(os.Getenv("RESTIC_FEATURES"), func(s string) {
@ -115,7 +120,16 @@ func main() {
debug.Log("main %#v", os.Args)
debug.Log("restic %s compiled with %v on %v/%v",
version, runtime.Version(), runtime.GOOS, runtime.GOARCH)
err = cmdRoot.ExecuteContext(internalGlobalCtx)
ctx := createGlobalContext()
err = cmdRoot.ExecuteContext(ctx)
if err == nil {
err = ctx.Err()
} else if err == ErrOK {
// ErrOK overwrites context cancelation errors
err = nil
}
switch {
case restic.IsAlreadyLocked(err):
@ -137,11 +151,13 @@ func main() {
}
var exitCode int
switch err {
case nil:
switch {
case err == nil:
exitCode = 0
case ErrInvalidSourceData:
case err == ErrInvalidSourceData:
exitCode = 3
case errors.Is(err, context.Canceled):
exitCode = 130
default:
exitCode = 1
}

View File

@ -1,6 +1,7 @@
package main
import (
"context"
"os"
"github.com/restic/restic/internal/errors"
@ -56,7 +57,7 @@ func initSecondaryRepoOptions(f *pflag.FlagSet, opts *secondaryRepoOptions, repo
opts.PasswordCommand = os.Getenv("RESTIC_FROM_PASSWORD_COMMAND")
}
func fillSecondaryGlobalOpts(opts secondaryRepoOptions, gopts GlobalOptions, repoPrefix string) (GlobalOptions, bool, error) {
func fillSecondaryGlobalOpts(ctx context.Context, opts secondaryRepoOptions, gopts GlobalOptions, repoPrefix string) (GlobalOptions, bool, error) {
if opts.Repo == "" && opts.RepositoryFile == "" && opts.LegacyRepo == "" && opts.LegacyRepositoryFile == "" {
return GlobalOptions{}, false, errors.Fatal("Please specify a source repository location (--from-repo or --from-repository-file)")
}
@ -109,7 +110,7 @@ func fillSecondaryGlobalOpts(opts secondaryRepoOptions, gopts GlobalOptions, rep
return GlobalOptions{}, false, err
}
}
dstGopts.password, err = ReadPassword(dstGopts, "enter password for "+repoPrefix+" repository: ")
dstGopts.password, err = ReadPassword(ctx, dstGopts, "enter password for "+repoPrefix+" repository: ")
if err != nil {
return GlobalOptions{}, false, err
}

View File

@ -1,6 +1,7 @@
package main
import (
"context"
"os"
"path/filepath"
"testing"
@ -170,7 +171,7 @@ func TestFillSecondaryGlobalOpts(t *testing.T) {
// Test all valid cases
for _, testCase := range validSecondaryRepoTestCases {
DstGOpts, isFromRepo, err := fillSecondaryGlobalOpts(testCase.Opts, gOpts, "destination")
DstGOpts, isFromRepo, err := fillSecondaryGlobalOpts(context.TODO(), testCase.Opts, gOpts, "destination")
rtest.OK(t, err)
rtest.Equals(t, DstGOpts, testCase.DstGOpts)
rtest.Equals(t, isFromRepo, testCase.FromRepo)
@ -178,7 +179,7 @@ func TestFillSecondaryGlobalOpts(t *testing.T) {
// Test all invalid cases
for _, testCase := range invalidSecondaryRepoTestCases {
_, _, err := fillSecondaryGlobalOpts(testCase.Opts, gOpts, "destination")
_, _, err := fillSecondaryGlobalOpts(context.TODO(), testCase.Opts, gOpts, "destination")
rtest.Assert(t, err != nil, "Expected error, but function did not return an error")
}
}

View File

@ -56,6 +56,39 @@ snapshot for each volume that contains files to backup. Files are read from the
VSS snapshot instead of the regular filesystem. This allows to backup files that are
exclusively locked by another process during the backup.
You can use additional options to change VSS behaviour:
* ``-o vss.timeout`` specifies timeout for VSS snapshot creation, the default value is 120 seconds
* ``-o vss.excludeallmountpoints`` disable auto snapshotting of all volume mount points
* ``-o vss.excludevolumes`` allows excluding specific volumes or volume mount points from snapshotting
* ``-o vss.provider`` specifies VSS provider used for snapshotting
E.g., 2.5 minutes timeout with mount points snapshotting disabled can be specified as
.. code-block:: console
-o vss.timeout=2m30s -o vss.excludeallmountpoints=true
and excluding drive ``D:\``, mount point ``C:\mnt`` and volume ``\\?\Volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}\`` as
.. code-block:: console
-o vss.excludevolumes="d:;c:\MNT\;\\?\volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}"
VSS provider can be specified by GUID
.. code-block:: console
-o vss.provider={3f900f90-00e9-440e-873a-96ca5eb079e5}
or by name
.. code-block:: console
-o vss.provider="Hyper-V IC Software Shadow Copy Provider"
Also ``MS`` can be used as alias for ``Microsoft Software Shadow Copy provider 1.0``.
By default VSS ignores Outlook OST files. This is not a restriction of restic
but the default Windows VSS configuration. The files not to snapshot are
configured in the Windows registry under the following key:

View File

@ -303,7 +303,7 @@ func generateFiles() {
}
}
var versionPattern = `var version = ".*"`
var versionPattern = `const version = ".*"`
const versionCodeFile = "cmd/restic/global.go"
@ -313,7 +313,7 @@ func updateVersion() {
die("unable to write version to file: %v", err)
}
newVersion := fmt.Sprintf("var version = %q", opts.Version)
newVersion := fmt.Sprintf("const version = %q", opts.Version)
replace(versionCodeFile, versionPattern, newVersion)
if len(uncommittedChanges("VERSION")) > 0 || len(uncommittedChanges(versionCodeFile)) > 0 {
@ -323,7 +323,7 @@ func updateVersion() {
}
func updateVersionDev() {
newVersion := fmt.Sprintf(`var version = "%s-dev (compiled manually)"`, opts.Version)
newVersion := fmt.Sprintf(`const version = "%s-dev (compiled manually)"`, opts.Version)
replace(versionCodeFile, versionPattern, newVersion)
msg("committing cmd/restic/global.go with dev version")

View File

@ -380,6 +380,7 @@ func (fn *FutureNode) take(ctx context.Context) futureNodeResult {
return res
}
case <-ctx.Done():
return futureNodeResult{err: ctx.Err()}
}
return futureNodeResult{err: errors.Errorf("no result")}
}

View File

@ -90,6 +90,10 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I
// return the error if it wasn't ignored
if fnr.err != nil {
debug.Log("err for %v: %v", fnr.snPath, fnr.err)
if fnr.err == context.Canceled {
return nil, stats, fnr.err
}
fnr.err = s.errFn(fnr.target, fnr.err)
if fnr.err == nil {
// ignore error

View File

@ -106,9 +106,9 @@ func (c *Checker) LoadSnapshots(ctx context.Context) error {
return err
}
func computePackTypes(ctx context.Context, idx restic.MasterIndex) map[restic.ID]restic.BlobType {
func computePackTypes(ctx context.Context, idx restic.MasterIndex) (map[restic.ID]restic.BlobType, error) {
packs := make(map[restic.ID]restic.BlobType)
idx.Each(ctx, func(pb restic.PackedBlob) {
err := idx.Each(ctx, func(pb restic.PackedBlob) {
tpe, exists := packs[pb.PackID]
if exists {
if pb.Type != tpe {
@ -119,7 +119,7 @@ func computePackTypes(ctx context.Context, idx restic.MasterIndex) map[restic.ID
}
packs[pb.PackID] = tpe
})
return packs
return packs, err
}
// LoadIndex loads all index files.
@ -169,7 +169,7 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e
debug.Log("process blobs")
cnt := 0
index.Each(ctx, func(blob restic.PackedBlob) {
err = index.Each(ctx, func(blob restic.PackedBlob) {
cnt++
if _, ok := packToIndex[blob.PackID]; !ok {
@ -179,7 +179,7 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e
})
debug.Log("%d blobs processed", cnt)
return nil
return err
})
if err != nil {
errs = append(errs, err)
@ -193,8 +193,14 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e
}
// compute pack size using index entries
c.packs = pack.Size(ctx, c.masterIndex, false)
packTypes := computePackTypes(ctx, c.masterIndex)
c.packs, err = pack.Size(ctx, c.masterIndex, false)
if err != nil {
return hints, append(errs, err)
}
packTypes, err := computePackTypes(ctx, c.masterIndex)
if err != nil {
return hints, append(errs, err)
}
debug.Log("checking for duplicate packs")
for packID := range c.packs {
@ -484,7 +490,7 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) {
}
// UnusedBlobs returns all blobs that have never been referenced.
func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles) {
func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles, err error) {
if !c.trackUnused {
panic("only works when tracking blob references")
}
@ -495,7 +501,7 @@ func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
c.repo.Index().Each(ctx, func(blob restic.PackedBlob) {
err = c.repo.Index().Each(ctx, func(blob restic.PackedBlob) {
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
if !c.blobRefs.M.Has(h) {
debug.Log("blob %v not referenced", h)
@ -503,7 +509,7 @@ func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles) {
}
})
return blobs
return blobs, err
}
// CountPacks returns the number of packs in the repository.

View File

@ -180,7 +180,8 @@ func TestUnreferencedBlobs(t *testing.T) {
test.OKs(t, checkPacks(chkr))
test.OKs(t, checkStruct(chkr))
blobs := chkr.UnusedBlobs(context.TODO())
blobs, err := chkr.UnusedBlobs(context.TODO())
test.OK(t, err)
sort.Sort(blobs)
test.Equals(t, unusedBlobsBySnapshot, blobs)

View File

@ -43,7 +43,10 @@ func TestCheckRepo(t testing.TB, repo restic.Repository, skipStructure bool) {
}
// unused blobs
blobs := chkr.UnusedBlobs(context.TODO())
blobs, err := chkr.UnusedBlobs(context.TODO())
if err != nil {
t.Error(err)
}
if len(blobs) > 0 {
t.Errorf("unused blobs found: %v", blobs)
}

View File

@ -3,41 +3,108 @@ package fs
import (
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
)
// ErrorHandler is used to report errors via callback
type ErrorHandler func(item string, err error) error
// VSSConfig holds extended options of windows volume shadow copy service.
type VSSConfig struct {
ExcludeAllMountPoints bool `option:"excludeallmountpoints" help:"exclude mountpoints from snapshotting on all volumes"`
ExcludeVolumes string `option:"excludevolumes" help:"semicolon separated list of volumes to exclude from snapshotting (ex. 'c:\\;e:\\mnt;\\\\?\\Volume{...}')"`
Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshot before timing out"`
Provider string `option:"provider" help:"VSS provider identifier which will be used for snapshotting"`
}
func init() {
if runtime.GOOS == "windows" {
options.Register("vss", VSSConfig{})
}
}
// NewVSSConfig returns a new VSSConfig with the default values filled in.
func NewVSSConfig() VSSConfig {
return VSSConfig{
Timeout: time.Second * 120,
}
}
// ParseVSSConfig parses a VSS extended options to VSSConfig struct.
func ParseVSSConfig(o options.Options) (VSSConfig, error) {
cfg := NewVSSConfig()
o = o.Extract("vss")
if err := o.Apply("vss", &cfg); err != nil {
return VSSConfig{}, err
}
return cfg, nil
}
// ErrorHandler is used to report errors via callback.
type ErrorHandler func(item string, err error)
// MessageHandler is used to report errors/messages via callbacks.
type MessageHandler func(msg string, args ...interface{})
// VolumeFilter is used to filter volumes by it's mount point or GUID path.
type VolumeFilter func(volume string) bool
// LocalVss is a wrapper around the local file system which uses windows volume
// shadow copy service (VSS) in a transparent way.
type LocalVss struct {
FS
snapshots map[string]VssSnapshot
failedSnapshots map[string]struct{}
mutex sync.RWMutex
msgError ErrorHandler
msgMessage MessageHandler
snapshots map[string]VssSnapshot
failedSnapshots map[string]struct{}
mutex sync.RWMutex
msgError ErrorHandler
msgMessage MessageHandler
excludeAllMountPoints bool
excludeVolumes map[string]struct{}
timeout time.Duration
provider string
}
// statically ensure that LocalVss implements FS.
var _ FS = &LocalVss{}
// parseMountPoints try to convert semicolon separated list of mount points
// to map of lowercased volume GUID pathes. Mountpoints already in volume
// GUID path format will be validated and normalized.
func parseMountPoints(list string, msgError ErrorHandler) (volumes map[string]struct{}) {
if list == "" {
return
}
for _, s := range strings.Split(list, ";") {
if v, err := GetVolumeNameForVolumeMountPoint(s); err != nil {
msgError(s, errors.Errorf("failed to parse vss.excludevolumes [%s]: %s", s, err))
} else {
if volumes == nil {
volumes = make(map[string]struct{})
}
volumes[strings.ToLower(v)] = struct{}{}
}
}
return
}
// NewLocalVss creates a new wrapper around the windows filesystem using volume
// shadow copy service to access locked files.
func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler) *LocalVss {
func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler, cfg VSSConfig) *LocalVss {
return &LocalVss{
FS: Local{},
snapshots: make(map[string]VssSnapshot),
failedSnapshots: make(map[string]struct{}),
msgError: msgError,
msgMessage: msgMessage,
FS: Local{},
snapshots: make(map[string]VssSnapshot),
failedSnapshots: make(map[string]struct{}),
msgError: msgError,
msgMessage: msgMessage,
excludeAllMountPoints: cfg.ExcludeAllMountPoints,
excludeVolumes: parseMountPoints(cfg.ExcludeVolumes, msgError),
timeout: cfg.Timeout,
provider: cfg.Provider,
}
}
@ -50,7 +117,7 @@ func (fs *LocalVss) DeleteSnapshots() {
for volumeName, snapshot := range fs.snapshots {
if err := snapshot.Delete(); err != nil {
_ = fs.msgError(volumeName, errors.Errorf("failed to delete VSS snapshot: %s", err))
fs.msgError(volumeName, errors.Errorf("failed to delete VSS snapshot: %s", err))
activeSnapshots[volumeName] = snapshot
}
}
@ -78,12 +145,29 @@ func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) {
return os.Lstat(fs.snapshotPath(name))
}
// isMountPointExcluded is true if given mountpoint excluded by user.
func (fs *LocalVss) isMountPointExcluded(mountPoint string) bool {
if fs.excludeVolumes == nil {
return false
}
volume, err := GetVolumeNameForVolumeMountPoint(mountPoint)
if err != nil {
fs.msgError(mountPoint, errors.Errorf("failed to get volume from mount point [%s]: %s", mountPoint, err))
return false
}
_, ok := fs.excludeVolumes[strings.ToLower(volume)]
return ok
}
// snapshotPath returns the path inside a VSS snapshots if it already exists.
// If the path is not yet available as a snapshot, a snapshot is created.
// If creation of a snapshot fails the file's original path is returned as
// a fallback.
func (fs *LocalVss) snapshotPath(path string) string {
fixPath := fixpath(path)
if strings.HasPrefix(fixPath, `\\?\UNC\`) {
@ -114,23 +198,36 @@ func (fs *LocalVss) snapshotPath(path string) string {
if !snapshotExists && !snapshotFailed {
vssVolume := volumeNameLower + string(filepath.Separator)
fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume)
if snapshot, err := NewVssSnapshot(vssVolume, 120, fs.msgError); err != nil {
_ = fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s",
vssVolume, err))
if fs.isMountPointExcluded(vssVolume) {
fs.msgMessage("snapshots for [%s] excluded by user\n", vssVolume)
fs.failedSnapshots[volumeNameLower] = struct{}{}
} else {
fs.snapshots[volumeNameLower] = snapshot
fs.msgMessage("successfully created snapshot for [%s]\n", vssVolume)
if len(snapshot.mountPointInfo) > 0 {
fs.msgMessage("mountpoints in snapshot volume [%s]:\n", vssVolume)
for mp, mpInfo := range snapshot.mountPointInfo {
info := ""
if !mpInfo.IsSnapshotted() {
info = " (not snapshotted)"
fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume)
var filter VolumeFilter
if !fs.excludeAllMountPoints {
filter = func(volume string) bool {
return !fs.isMountPointExcluded(volume)
}
}
if snapshot, err := NewVssSnapshot(fs.provider, vssVolume, fs.timeout, filter, fs.msgError); err != nil {
fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s",
vssVolume, err))
fs.failedSnapshots[volumeNameLower] = struct{}{}
} else {
fs.snapshots[volumeNameLower] = snapshot
fs.msgMessage("successfully created snapshot for [%s]\n", vssVolume)
if len(snapshot.mountPointInfo) > 0 {
fs.msgMessage("mountpoints in snapshot volume [%s]:\n", vssVolume)
for mp, mpInfo := range snapshot.mountPointInfo {
info := ""
if !mpInfo.IsSnapshotted() {
info = " (not snapshotted)"
}
fs.msgMessage(" - %s%s\n", mp, info)
}
fs.msgMessage(" - %s%s\n", mp, info)
}
}
}
@ -173,9 +270,8 @@ func (fs *LocalVss) snapshotPath(path string) string {
snapshotPath = fs.Join(snapshot.GetSnapshotDeviceObject(),
strings.TrimPrefix(fixPath, volumeName))
if snapshotPath == snapshot.GetSnapshotDeviceObject() {
snapshotPath = snapshotPath + string(filepath.Separator)
snapshotPath += string(filepath.Separator)
}
} else {
// no snapshot is available for the requested path:
// -> try to backup without a snapshot

View File

@ -0,0 +1,287 @@
// +build windows
package fs
import (
"fmt"
"regexp"
"strings"
"testing"
"time"
ole "github.com/go-ole/go-ole"
"github.com/restic/restic/internal/options"
)
func matchStrings(ptrs []string, strs []string) bool {
if len(ptrs) != len(strs) {
return false
}
for i, p := range ptrs {
if p == "" {
return false
}
matched, err := regexp.MatchString(p, strs[i])
if err != nil {
panic(err)
}
if !matched {
return false
}
}
return true
}
func matchMap(strs []string, m map[string]struct{}) bool {
if len(strs) != len(m) {
return false
}
for _, s := range strs {
if _, ok := m[s]; !ok {
return false
}
}
return true
}
func TestVSSConfig(t *testing.T) {
type config struct {
excludeAllMountPoints bool
timeout time.Duration
provider string
}
setTests := []struct {
input options.Options
output config
}{
{
options.Options{
"vss.timeout": "6h38m42s",
"vss.provider": "Ms",
},
config{
timeout: 23922000000000,
provider: "Ms",
},
},
{
options.Options{
"vss.excludeallmountpoints": "t",
"vss.provider": "{b5946137-7b9f-4925-af80-51abd60b20d5}",
},
config{
excludeAllMountPoints: true,
timeout: 120000000000,
provider: "{b5946137-7b9f-4925-af80-51abd60b20d5}",
},
},
{
options.Options{
"vss.excludeallmountpoints": "0",
"vss.excludevolumes": "",
"vss.timeout": "120s",
"vss.provider": "Microsoft Software Shadow Copy provider 1.0",
},
config{
timeout: 120000000000,
provider: "Microsoft Software Shadow Copy provider 1.0",
},
},
}
for i, test := range setTests {
t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) {
cfg, err := ParseVSSConfig(test.input)
if err != nil {
t.Fatal(err)
}
errorHandler := func(item string, err error) {
t.Fatalf("unexpected error (%v)", err)
}
messageHandler := func(msg string, args ...interface{}) {
t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args))
}
dst := NewLocalVss(errorHandler, messageHandler, cfg)
if dst.excludeAllMountPoints != test.output.excludeAllMountPoints ||
dst.excludeVolumes != nil || dst.timeout != test.output.timeout ||
dst.provider != test.output.provider {
t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.output, dst)
}
})
}
}
func TestParseMountPoints(t *testing.T) {
volumeMatch := regexp.MustCompile(`^\\\\\?\\Volume\{[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}\}\\$`)
// It's not a good idea to test functions based on GetVolumeNameForVolumeMountPoint by calling
// GetVolumeNameForVolumeMountPoint itself, but we have restricted test environment:
// cannot manage volumes and can only be sure that the mount point C:\ exists
sysVolume, err := GetVolumeNameForVolumeMountPoint("C:")
if err != nil {
t.Fatal(err)
}
// We don't know a valid volume GUID path for C:\, but we'll at least check its format
if !volumeMatch.MatchString(sysVolume) {
t.Fatalf("invalid volume GUID path: %s", sysVolume)
}
sysVolumeMutated := strings.ToUpper(sysVolume[:len(sysVolume)-1])
sysVolumeMatch := strings.ToLower(sysVolume)
type check struct {
volume string
result bool
}
setTests := []struct {
input options.Options
output []string
checks []check
errors []string
}{
{
options.Options{
"vss.excludevolumes": `c:;c:\;` + sysVolume + `;` + sysVolumeMutated,
},
[]string{
sysVolumeMatch,
},
[]check{
{`c:\`, true},
{`c:`, true},
{sysVolume, true},
{sysVolumeMutated, true},
},
[]string{},
},
{
options.Options{
"vss.excludevolumes": `z:\nonexistent;c:;c:\windows\;\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`,
},
[]string{
sysVolumeMatch,
},
[]check{
{`c:\windows\`, false},
{`\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, false},
{`c:`, true},
{``, false},
},
[]string{
`failed to parse vss\.excludevolumes \[z:\\nonexistent\]:.*`,
`failed to parse vss\.excludevolumes \[c:\\windows\\\]:.*`,
`failed to parse vss\.excludevolumes \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`,
`failed to get volume from mount point \[c:\\windows\\\]:.*`,
`failed to get volume from mount point \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`,
`failed to get volume from mount point \[\]:.*`,
},
},
}
for i, test := range setTests {
t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) {
cfg, err := ParseVSSConfig(test.input)
if err != nil {
t.Fatal(err)
}
var log []string
errorHandler := func(item string, err error) {
log = append(log, strings.TrimSpace(err.Error()))
}
messageHandler := func(msg string, args ...interface{}) {
t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args))
}
dst := NewLocalVss(errorHandler, messageHandler, cfg)
if !matchMap(test.output, dst.excludeVolumes) {
t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v",
test.output, dst.excludeVolumes)
}
for _, c := range test.checks {
if dst.isMountPointExcluded(c.volume) != c.result {
t.Fatalf(`wrong check: isMountPointExcluded("%s") != %v`, c.volume, c.result)
}
}
if !matchStrings(test.errors, log) {
t.Fatalf("wrong log, want:\n %#v\ngot:\n %#v", test.errors, log)
}
})
}
}
func TestParseProvider(t *testing.T) {
msProvider := ole.NewGUID("{b5946137-7b9f-4925-af80-51abd60b20d5}")
setTests := []struct {
provider string
id *ole.GUID
result string
}{
{
"",
ole.IID_NULL,
"",
},
{
"mS",
msProvider,
"",
},
{
"{B5946137-7b9f-4925-Af80-51abD60b20d5}",
msProvider,
"",
},
{
"Microsoft Software Shadow Copy provider 1.0",
msProvider,
"",
},
{
"{04560982-3d7d-4bbc-84f7-0712f833a28f}",
nil,
`invalid VSS provider "{04560982-3d7d-4bbc-84f7-0712f833a28f}"`,
},
{
"non-existent provider",
nil,
`invalid VSS provider "non-existent provider"`,
},
}
_ = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
for i, test := range setTests {
t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) {
id, err := getProviderID(test.provider)
if err != nil && id != nil {
t.Fatalf("err!=nil but id=%v", id)
}
if test.result != "" || err != nil {
var result string
if err != nil {
result = err.Error()
}
matched, err := regexp.MatchString(test.result, result)
if err != nil {
panic(err)
}
if !matched || test.result == "" {
t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.result, result)
}
} else if !ole.IsEqualGUID(id, test.id) {
t.Fatalf("wrong id, want:\n %s\ngot:\n %s", test.id.String(), id.String())
}
})
}
}

View File

@ -4,6 +4,8 @@
package fs
import (
"time"
"github.com/restic/restic/internal/errors"
)
@ -31,10 +33,16 @@ func HasSufficientPrivilegesForVSS() error {
return errors.New("VSS snapshots are only supported on windows")
}
// GetVolumeNameForVolumeMountPoint clear input parameter
// and calls the equivalent windows api.
func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) {
return mountPoint, nil
}
// NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't
// finish within the timeout an error is returned.
func NewVssSnapshot(
_ string, _ uint, _ ErrorHandler) (VssSnapshot, error) {
func NewVssSnapshot(_ string,
_ string, _ time.Duration, _ VolumeFilter, _ ErrorHandler) (VssSnapshot, error) {
return VssSnapshot{}, errors.New("VSS snapshots are only supported on windows")
}

View File

@ -9,6 +9,7 @@ import (
"runtime"
"strings"
"syscall"
"time"
"unsafe"
ole "github.com/go-ole/go-ole"
@ -20,6 +21,7 @@ import (
type HRESULT uint
// HRESULT constant values necessary for using VSS api.
//nolint:golint
const (
S_OK HRESULT = 0x00000000
E_ACCESSDENIED HRESULT = 0x80070005
@ -255,6 +257,7 @@ type IVssBackupComponents struct {
}
// IVssBackupComponentsVTable is the vtable for IVssBackupComponents.
// nolint:structcheck
type IVssBackupComponentsVTable struct {
ole.IUnknownVtbl
getWriterComponentsCount uintptr
@ -364,7 +367,7 @@ func (vss *IVssBackupComponents) convertToVSSAsync(
}
// IsVolumeSupported calls the equivalent VSS api.
func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, error) {
func (vss *IVssBackupComponents) IsVolumeSupported(providerID *ole.GUID, volumeName string) (bool, error) {
volumeNamePointer, err := syscall.UTF16PtrFromString(volumeName)
if err != nil {
panic(err)
@ -374,7 +377,7 @@ func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, err
var result uintptr
if runtime.GOARCH == "386" {
id := (*[4]uintptr)(unsafe.Pointer(ole.IID_NULL))
id := (*[4]uintptr)(unsafe.Pointer(providerID))
result, _, _ = syscall.Syscall9(vss.getVTable().isVolumeSupported, 7,
uintptr(unsafe.Pointer(vss)), id[0], id[1], id[2], id[3],
@ -382,7 +385,7 @@ func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, err
0)
} else {
result, _, _ = syscall.Syscall6(vss.getVTable().isVolumeSupported, 4,
uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(ole.IID_NULL)),
uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(providerID)),
uintptr(unsafe.Pointer(volumeNamePointer)), uintptr(unsafe.Pointer(&isSupportedRaw)), 0,
0)
}
@ -408,24 +411,24 @@ func (vss *IVssBackupComponents) StartSnapshotSet() (ole.GUID, error) {
}
// AddToSnapshotSet calls the equivalent VSS api.
func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, idSnapshot *ole.GUID) error {
func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, providerID *ole.GUID, idSnapshot *ole.GUID) error {
volumeNamePointer, err := syscall.UTF16PtrFromString(volumeName)
if err != nil {
panic(err)
}
var result uintptr = 0
var result uintptr
if runtime.GOARCH == "386" {
id := (*[4]uintptr)(unsafe.Pointer(ole.IID_NULL))
id := (*[4]uintptr)(unsafe.Pointer(providerID))
result, _, _ = syscall.Syscall9(vss.getVTable().addToSnapshotSet, 7,
uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)), id[0], id[1],
id[2], id[3], uintptr(unsafe.Pointer(idSnapshot)), 0, 0)
uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)),
id[0], id[1], id[2], id[3], uintptr(unsafe.Pointer(idSnapshot)), 0, 0)
} else {
result, _, _ = syscall.Syscall6(vss.getVTable().addToSnapshotSet, 4,
uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)),
uintptr(unsafe.Pointer(ole.IID_NULL)), uintptr(unsafe.Pointer(idSnapshot)), 0, 0)
uintptr(unsafe.Pointer(providerID)), uintptr(unsafe.Pointer(idSnapshot)), 0, 0)
}
return newVssErrorIfResultNotOK("AddToSnapshotSet() failed", HRESULT(result))
@ -478,9 +481,9 @@ func (vss *IVssBackupComponents) DoSnapshotSet() (*IVSSAsync, error) {
// DeleteSnapshots calls the equivalent VSS api.
func (vss *IVssBackupComponents) DeleteSnapshots(snapshotID ole.GUID) (int32, ole.GUID, error) {
var deletedSnapshots int32 = 0
var deletedSnapshots int32
var nondeletedSnapshotID ole.GUID
var result uintptr = 0
var result uintptr
if runtime.GOARCH == "386" {
id := (*[4]uintptr)(unsafe.Pointer(&snapshotID))
@ -504,7 +507,7 @@ func (vss *IVssBackupComponents) DeleteSnapshots(snapshotID ole.GUID) (int32, ol
// GetSnapshotProperties calls the equivalent VSS api.
func (vss *IVssBackupComponents) GetSnapshotProperties(snapshotID ole.GUID,
properties *VssSnapshotProperties) error {
var result uintptr = 0
var result uintptr
if runtime.GOARCH == "386" {
id := (*[4]uintptr)(unsafe.Pointer(&snapshotID))
@ -527,11 +530,18 @@ func vssFreeSnapshotProperties(properties *VssSnapshotProperties) error {
if err != nil {
return err
}
proc.Call(uintptr(unsafe.Pointer(properties)))
// this function always succeeds and returns no value
_, _, _ = proc.Call(uintptr(unsafe.Pointer(properties)))
return nil
}
func vssFreeProviderProperties(p *VssProviderProperties) {
ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerName)))
p.providerName = nil
ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerVersion)))
p.providerName = nil
}
// BackupComplete calls the equivalent VSS api.
func (vss *IVssBackupComponents) BackupComplete() (*IVSSAsync, error) {
var oleIUnknown *ole.IUnknown
@ -543,6 +553,7 @@ func (vss *IVssBackupComponents) BackupComplete() (*IVSSAsync, error) {
}
// VssSnapshotProperties defines the properties of a VSS snapshot as part of the VSS api.
// nolint:structcheck
type VssSnapshotProperties struct {
snapshotID ole.GUID
snapshotSetID ole.GUID
@ -559,6 +570,17 @@ type VssSnapshotProperties struct {
status uint
}
// VssProviderProperties defines the properties of a VSS provider as part of the VSS api.
// nolint:structcheck
type VssProviderProperties struct {
providerID ole.GUID
providerName *uint16
providerType uint32
providerVersion *uint16
providerVersionID ole.GUID
classID ole.GUID
}
// GetSnapshotDeviceObject returns root path to access the snapshot files
// and folders.
func (p *VssSnapshotProperties) GetSnapshotDeviceObject() string {
@ -617,8 +639,13 @@ func (vssAsync *IVSSAsync) QueryStatus() (HRESULT, uint32) {
// WaitUntilAsyncFinished waits until either the async call is finished or
// the given timeout is reached.
func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(millis uint32) error {
hresult := vssAsync.Wait(millis)
func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(timeout time.Duration) error {
const maxTimeout = 2147483647 * time.Millisecond
if timeout > maxTimeout {
timeout = maxTimeout
}
hresult := vssAsync.Wait(uint32(timeout.Milliseconds()))
err := newVssErrorIfResultNotOK("Wait() failed", hresult)
if err != nil {
vssAsync.Cancel()
@ -651,6 +678,75 @@ func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(millis uint32) error {
return nil
}
// UIID_IVSS_ADMIN defines the GUID of IVSSAdmin.
var (
UIID_IVSS_ADMIN = ole.NewGUID("{77ED5996-2F63-11d3-8A39-00C04F72D8E3}")
CLSID_VSS_COORDINATOR = ole.NewGUID("{E579AB5F-1CC4-44b4-BED9-DE0991FF0623}")
)
// IVSSAdmin VSS api interface.
type IVSSAdmin struct {
ole.IUnknown
}
// IVSSAdminVTable is the vtable for IVSSAdmin.
// nolint:structcheck
type IVSSAdminVTable struct {
ole.IUnknownVtbl
registerProvider uintptr
unregisterProvider uintptr
queryProviders uintptr
abortAllSnapshotsInProgress uintptr
}
// getVTable returns the vtable for IVSSAdmin.
func (vssAdmin *IVSSAdmin) getVTable() *IVSSAdminVTable {
return (*IVSSAdminVTable)(unsafe.Pointer(vssAdmin.RawVTable))
}
// QueryProviders calls the equivalent VSS api.
func (vssAdmin *IVSSAdmin) QueryProviders() (*IVssEnumObject, error) {
var enum *IVssEnumObject
result, _, _ := syscall.Syscall(vssAdmin.getVTable().queryProviders, 2,
uintptr(unsafe.Pointer(vssAdmin)), uintptr(unsafe.Pointer(&enum)), 0)
return enum, newVssErrorIfResultNotOK("QueryProviders() failed", HRESULT(result))
}
// IVssEnumObject VSS api interface.
type IVssEnumObject struct {
ole.IUnknown
}
// IVssEnumObjectVTable is the vtable for IVssEnumObject.
// nolint:structcheck
type IVssEnumObjectVTable struct {
ole.IUnknownVtbl
next uintptr
skip uintptr
reset uintptr
clone uintptr
}
// getVTable returns the vtable for IVssEnumObject.
func (vssEnum *IVssEnumObject) getVTable() *IVssEnumObjectVTable {
return (*IVssEnumObjectVTable)(unsafe.Pointer(vssEnum.RawVTable))
}
// Next calls the equivalent VSS api.
func (vssEnum *IVssEnumObject) Next(count uint, props unsafe.Pointer) (uint, error) {
var fetched uint32
result, _, _ := syscall.Syscall6(vssEnum.getVTable().next, 4,
uintptr(unsafe.Pointer(vssEnum)), uintptr(count), uintptr(props),
uintptr(unsafe.Pointer(&fetched)), 0, 0)
if result == 1 {
return uint(fetched), nil
}
return uint(fetched), newVssErrorIfResultNotOK("Next() failed", HRESULT(result))
}
// MountPoint wraps all information of a snapshot of a mountpoint on a volume.
type MountPoint struct {
isSnapshotted bool
@ -677,7 +773,7 @@ type VssSnapshot struct {
snapshotProperties VssSnapshotProperties
snapshotDeviceObject string
mountPointInfo map[string]MountPoint
timeoutInMillis uint32
timeout time.Duration
}
// GetSnapshotDeviceObject returns root path to access the snapshot files
@ -694,7 +790,12 @@ func initializeVssCOMInterface() (*ole.IUnknown, error) {
}
// ensure COM is initialized before use
ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
if err = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED); err != nil {
// CoInitializeEx returns 1 if COM is already initialized
if oleErr, ok := err.(*ole.OleError); !ok || oleErr.Code() != 1 {
return nil, err
}
}
var oleIUnknown *ole.IUnknown
result, _, _ := vssInstance.Call(uintptr(unsafe.Pointer(&oleIUnknown)))
@ -727,12 +828,34 @@ func HasSufficientPrivilegesForVSS() error {
return err
}
// GetVolumeNameForVolumeMountPoint clear input parameter
// and calls the equivalent windows api.
func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) {
if mountPoint != "" && mountPoint[len(mountPoint)-1] != filepath.Separator {
mountPoint += string(filepath.Separator)
}
mountPointPointer, err := syscall.UTF16PtrFromString(mountPoint)
if err != nil {
return mountPoint, err
}
// A reasonable size for the buffer to accommodate the largest possible
// volume GUID path is 50 characters.
volumeNameBuffer := make([]uint16, 50)
if err := windows.GetVolumeNameForVolumeMountPoint(
mountPointPointer, &volumeNameBuffer[0], 50); err != nil {
return mountPoint, err
}
return syscall.UTF16ToString(volumeNameBuffer), nil
}
// NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't
// finish within the timeout an error is returned.
func NewVssSnapshot(
volume string, timeoutInSeconds uint, msgError ErrorHandler) (VssSnapshot, error) {
func NewVssSnapshot(provider string,
volume string, timeout time.Duration, filter VolumeFilter, msgError ErrorHandler) (VssSnapshot, error) {
is64Bit, err := isRunningOn64BitWindows()
if err != nil {
return VssSnapshot{}, newVssTextError(fmt.Sprintf(
"Failed to detect windows architecture: %s", err.Error()))
@ -744,7 +867,7 @@ func NewVssSnapshot(
runtime.GOARCH))
}
timeoutInMillis := uint32(timeoutInSeconds * 1000)
deadline := time.Now().Add(timeout)
oleIUnknown, err := initializeVssCOMInterface()
if oleIUnknown != nil {
@ -778,6 +901,12 @@ func NewVssSnapshot(
iVssBackupComponents := (*IVssBackupComponents)(unsafe.Pointer(comInterface))
providerID, err := getProviderID(provider)
if err != nil {
iVssBackupComponents.Release()
return VssSnapshot{}, err
}
if err := iVssBackupComponents.InitializeForBackup(); err != nil {
iVssBackupComponents.Release()
return VssSnapshot{}, err
@ -796,13 +925,13 @@ func NewVssSnapshot(
}
err = callAsyncFunctionAndWait(iVssBackupComponents.GatherWriterMetadata,
"GatherWriterMetadata", timeoutInMillis)
"GatherWriterMetadata", deadline)
if err != nil {
iVssBackupComponents.Release()
return VssSnapshot{}, err
}
if isSupported, err := iVssBackupComponents.IsVolumeSupported(volume); err != nil {
if isSupported, err := iVssBackupComponents.IsVolumeSupported(providerID, volume); err != nil {
iVssBackupComponents.Release()
return VssSnapshot{}, err
} else if !isSupported {
@ -817,57 +946,66 @@ func NewVssSnapshot(
return VssSnapshot{}, err
}
if err := iVssBackupComponents.AddToSnapshotSet(volume, &snapshotSetID); err != nil {
if err := iVssBackupComponents.AddToSnapshotSet(volume, providerID, &snapshotSetID); err != nil {
iVssBackupComponents.Release()
return VssSnapshot{}, err
}
mountPoints, err := enumerateMountedFolders(volume)
if err != nil {
iVssBackupComponents.Release()
return VssSnapshot{}, newVssTextError(fmt.Sprintf(
"failed to enumerate mount points for volume %s: %s", volume, err))
}
mountPointInfo := make(map[string]MountPoint)
for _, mountPoint := range mountPoints {
// ensure every mountpoint is available even without a valid
// snapshot because we need to consider this when backing up files
mountPointInfo[mountPoint] = MountPoint{isSnapshotted: false}
if isSupported, err := iVssBackupComponents.IsVolumeSupported(mountPoint); err != nil {
continue
} else if !isSupported {
continue
}
var mountPointSnapshotSetID ole.GUID
err := iVssBackupComponents.AddToSnapshotSet(mountPoint, &mountPointSnapshotSetID)
// if filter==nil just don't process mount points for this volume at all
if filter != nil {
mountPoints, err := enumerateMountedFolders(volume)
if err != nil {
iVssBackupComponents.Release()
return VssSnapshot{}, err
return VssSnapshot{}, newVssTextError(fmt.Sprintf(
"failed to enumerate mount points for volume %s: %s", volume, err))
}
mountPointInfo[mountPoint] = MountPoint{isSnapshotted: true,
snapshotSetID: mountPointSnapshotSetID}
for _, mountPoint := range mountPoints {
// ensure every mountpoint is available even without a valid
// snapshot because we need to consider this when backing up files
mountPointInfo[mountPoint] = MountPoint{isSnapshotted: false}
if !filter(mountPoint) {
continue
} else if isSupported, err := iVssBackupComponents.IsVolumeSupported(providerID, mountPoint); err != nil {
continue
} else if !isSupported {
continue
}
var mountPointSnapshotSetID ole.GUID
err := iVssBackupComponents.AddToSnapshotSet(mountPoint, providerID, &mountPointSnapshotSetID)
if err != nil {
iVssBackupComponents.Release()
return VssSnapshot{}, err
}
mountPointInfo[mountPoint] = MountPoint{
isSnapshotted: true,
snapshotSetID: mountPointSnapshotSetID,
}
}
}
err = callAsyncFunctionAndWait(iVssBackupComponents.PrepareForBackup, "PrepareForBackup",
timeoutInMillis)
deadline)
if err != nil {
// After calling PrepareForBackup one needs to call AbortBackup() before releasing the VSS
// instance for proper cleanup.
// It is not necessary to call BackupComplete before releasing the VSS instance afterwards.
iVssBackupComponents.AbortBackup()
_ = iVssBackupComponents.AbortBackup()
iVssBackupComponents.Release()
return VssSnapshot{}, err
}
err = callAsyncFunctionAndWait(iVssBackupComponents.DoSnapshotSet, "DoSnapshotSet",
timeoutInMillis)
deadline)
if err != nil {
iVssBackupComponents.AbortBackup()
_ = iVssBackupComponents.AbortBackup()
iVssBackupComponents.Release()
return VssSnapshot{}, err
}
@ -875,13 +1013,12 @@ func NewVssSnapshot(
var snapshotProperties VssSnapshotProperties
err = iVssBackupComponents.GetSnapshotProperties(snapshotSetID, &snapshotProperties)
if err != nil {
iVssBackupComponents.AbortBackup()
_ = iVssBackupComponents.AbortBackup()
iVssBackupComponents.Release()
return VssSnapshot{}, err
}
for mountPoint, info := range mountPointInfo {
if !info.isSnapshotted {
continue
}
@ -900,8 +1037,10 @@ func NewVssSnapshot(
mountPointInfo[mountPoint] = info
}
return VssSnapshot{iVssBackupComponents, snapshotSetID, snapshotProperties,
snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, timeoutInMillis}, nil
return VssSnapshot{
iVssBackupComponents, snapshotSetID, snapshotProperties,
snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, time.Until(deadline),
}, nil
}
// Delete deletes the created snapshot.
@ -922,15 +1061,17 @@ func (p *VssSnapshot) Delete() error {
if p.iVssBackupComponents != nil {
defer p.iVssBackupComponents.Release()
deadline := time.Now().Add(p.timeout)
err = callAsyncFunctionAndWait(p.iVssBackupComponents.BackupComplete, "BackupComplete",
p.timeoutInMillis)
deadline)
if err != nil {
return err
}
if _, _, e := p.iVssBackupComponents.DeleteSnapshots(p.snapshotID); e != nil {
err = newVssTextError(fmt.Sprintf("Failed to delete snapshot: %s", e.Error()))
p.iVssBackupComponents.AbortBackup()
_ = p.iVssBackupComponents.AbortBackup()
if err != nil {
return err
}
@ -940,12 +1081,61 @@ func (p *VssSnapshot) Delete() error {
return nil
}
func getProviderID(provider string) (*ole.GUID, error) {
comInterface, err := ole.CreateInstance(CLSID_VSS_COORDINATOR, UIID_IVSS_ADMIN)
if err != nil {
return nil, err
}
defer comInterface.Release()
vssAdmin := (*IVSSAdmin)(unsafe.Pointer(comInterface))
providerLower := strings.ToLower(provider)
switch providerLower {
case "":
return ole.IID_NULL, nil
case "ms":
return ole.NewGUID("{b5946137-7b9f-4925-af80-51abd60b20d5}"), nil
}
enum, err := vssAdmin.QueryProviders()
if err != nil {
return nil, err
}
defer enum.Release()
id := ole.NewGUID(provider)
var props struct {
objectType uint32
provider VssProviderProperties
}
for {
count, err := enum.Next(1, unsafe.Pointer(&props))
if err != nil {
return nil, err
}
if count < 1 {
return nil, errors.Errorf(`invalid VSS provider "%s"`, provider)
}
name := ole.UTF16PtrToString(props.provider.providerName)
vssFreeProviderProperties(&props.provider)
if id != nil && *id == props.provider.providerID ||
id == nil && providerLower == strings.ToLower(name) {
return &props.provider.providerID, nil
}
}
}
// asyncCallFunc is the callback type for callAsyncFunctionAndWait.
type asyncCallFunc func() (*IVSSAsync, error)
// callAsyncFunctionAndWait calls an async functions and waits for it to either
// finish or timeout.
func callAsyncFunctionAndWait(function asyncCallFunc, name string, timeoutInMillis uint32) error {
func callAsyncFunctionAndWait(function asyncCallFunc, name string, deadline time.Time) error {
iVssAsync, err := function()
if err != nil {
return err
@ -955,7 +1145,12 @@ func callAsyncFunctionAndWait(function asyncCallFunc, name string, timeoutInMill
return newVssTextError(fmt.Sprintf("%s() returned nil", name))
}
err = iVssAsync.WaitUntilAsyncFinished(timeoutInMillis)
timeout := time.Until(deadline)
if timeout <= 0 {
return newVssTextError(fmt.Sprintf("%s() deadline exceeded", name))
}
err = iVssAsync.WaitUntilAsyncFinished(timeout)
iVssAsync.Release()
return err
}
@ -1036,6 +1231,7 @@ func enumerateMountedFolders(volume string) ([]string, error) {
return mountedFolders, nil
}
// nolint:errcheck
defer windows.FindVolumeMountPointClose(handle)
volumeMountPoint := syscall.UTF16ToString(volumeMountPointBuffer)

View File

@ -218,7 +218,7 @@ func (idx *Index) AddToSupersedes(ids ...restic.ID) error {
// Each passes all blobs known to the index to the callback fn. This blocks any
// modification of the index.
func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) {
func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) error {
idx.m.Lock()
defer idx.m.Unlock()
@ -232,6 +232,7 @@ func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) {
return true
})
}
return ctx.Err()
}
type EachByPackResult struct {

View File

@ -339,7 +339,7 @@ func TestIndexUnserialize(t *testing.T) {
rtest.Equals(t, oldIdx, idx.Supersedes())
blobs := listPack(idx, exampleLookupTest.packID)
blobs := listPack(t, idx, exampleLookupTest.packID)
if len(blobs) != len(exampleLookupTest.blobs) {
t.Fatalf("expected %d blobs in pack, got %d", len(exampleLookupTest.blobs), len(blobs))
}
@ -356,12 +356,12 @@ func TestIndexUnserialize(t *testing.T) {
}
}
func listPack(idx *index.Index, id restic.ID) (pbs []restic.PackedBlob) {
idx.Each(context.TODO(), func(pb restic.PackedBlob) {
func listPack(t testing.TB, idx *index.Index, id restic.ID) (pbs []restic.PackedBlob) {
rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) {
if pb.PackID.Equal(id) {
pbs = append(pbs, pb)
}
})
}))
return pbs
}

View File

@ -223,13 +223,16 @@ func (mi *MasterIndex) finalizeFullIndexes() []*Index {
// Each runs fn on all blobs known to the index. When the context is cancelled,
// the index iteration return immediately. This blocks any modification of the index.
func (mi *MasterIndex) Each(ctx context.Context, fn func(restic.PackedBlob)) {
func (mi *MasterIndex) Each(ctx context.Context, fn func(restic.PackedBlob)) error {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()
for _, idx := range mi.idx {
idx.Each(ctx, fn)
if err := idx.Each(ctx, fn); err != nil {
return err
}
}
return nil
}
// MergeFinalIndexes merges all final indexes together.
@ -320,6 +323,9 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.Repository, exclude
newIndex = NewIndex()
}
}
if wgCtx.Err() != nil {
return wgCtx.Err()
}
}
err := newIndex.AddToSupersedes(extraObsolete...)
@ -426,10 +432,6 @@ func (mi *MasterIndex) ListPacks(ctx context.Context, packs restic.IDSet) <-chan
defer close(out)
// only resort a part of the index to keep the memory overhead bounded
for i := byte(0); i < 16; i++ {
if ctx.Err() != nil {
return
}
packBlob := make(map[restic.ID][]restic.Blob)
for pack := range packs {
if pack[0]&0xf == i {
@ -439,11 +441,14 @@ func (mi *MasterIndex) ListPacks(ctx context.Context, packs restic.IDSet) <-chan
if len(packBlob) == 0 {
continue
}
mi.Each(ctx, func(pb restic.PackedBlob) {
err := mi.Each(ctx, func(pb restic.PackedBlob) {
if packs.Has(pb.PackID) && pb.PackID[0]&0xf == i {
packBlob[pb.PackID] = append(packBlob[pb.PackID], pb.Blob)
}
})
if err != nil {
return
}
// pass on packs
for packID, pbs := range packBlob {

View File

@ -166,9 +166,9 @@ func TestMasterMergeFinalIndexes(t *testing.T) {
rtest.Equals(t, 1, idxCount)
blobCount := 0
mIdx.Each(context.TODO(), func(pb restic.PackedBlob) {
rtest.OK(t, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) {
blobCount++
})
}))
rtest.Equals(t, 2, blobCount)
blobs := mIdx.Lookup(bhInIdx1)
@ -198,9 +198,9 @@ func TestMasterMergeFinalIndexes(t *testing.T) {
rtest.Equals(t, []restic.PackedBlob{blob2}, blobs)
blobCount = 0
mIdx.Each(context.TODO(), func(pb restic.PackedBlob) {
rtest.OK(t, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) {
blobCount++
})
}))
rtest.Equals(t, 2, blobCount)
}
@ -319,9 +319,9 @@ func BenchmarkMasterIndexEach(b *testing.B) {
for i := 0; i < b.N; i++ {
entries := 0
mIdx.Each(context.TODO(), func(pb restic.PackedBlob) {
rtest.OK(b, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) {
entries++
})
}))
}
}

View File

@ -389,10 +389,10 @@ func CalculateHeaderSize(blobs []restic.Blob) int {
// If onlyHdr is set to true, only the size of the header is returned
// Note that this function only gives correct sizes, if there are no
// duplicates in the index.
func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) map[restic.ID]int64 {
func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) (map[restic.ID]int64, error) {
packSize := make(map[restic.ID]int64)
mi.Each(ctx, func(blob restic.PackedBlob) {
err := mi.Each(ctx, func(blob restic.PackedBlob) {
size, ok := packSize[blob.PackID]
if !ok {
size = headerSize
@ -403,5 +403,5 @@ func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) map[restic.I
packSize[blob.PackID] = size + int64(CalculateEntrySize(blob.Blob))
})
return packSize
return packSize, err
}

View File

@ -124,12 +124,15 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, g
blobCount := keepBlobs.Len()
// when repacking, we do not want to keep blobs which are
// already contained in kept packs, so delete them from keepBlobs
repo.Index().Each(ctx, func(blob restic.PackedBlob) {
err := repo.Index().Each(ctx, func(blob restic.PackedBlob) {
if plan.removePacks.Has(blob.PackID) || plan.repackPacks.Has(blob.PackID) {
return
}
keepBlobs.Delete(blob.BlobHandle)
})
if err != nil {
return nil, err
}
if keepBlobs.Len() < blobCount/2 {
// replace with copy to shrink map to necessary size if there's a chance to benefit
@ -152,7 +155,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re
// iterate over all blobs in index to find out which blobs are duplicates
// The counter in usedBlobs describes how many instances of the blob exist in the repository index
// Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist
idx.Each(ctx, func(blob restic.PackedBlob) {
err := idx.Each(ctx, func(blob restic.PackedBlob) {
bh := blob.BlobHandle
count, ok := usedBlobs[bh]
if ok {
@ -166,6 +169,9 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re
usedBlobs[bh] = count
}
})
if err != nil {
return nil, nil, err
}
// Check if all used blobs have been found in index
missingBlobs := restic.NewBlobSet()
@ -188,14 +194,18 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re
indexPack := make(map[restic.ID]packInfo)
// save computed pack header size
for pid, hdrSize := range pack.Size(ctx, idx, true) {
sz, err := pack.Size(ctx, idx, true)
if err != nil {
return nil, nil, err
}
for pid, hdrSize := range sz {
// initialize tpe with NumBlobTypes to indicate it's not set
indexPack[pid] = packInfo{tpe: restic.NumBlobTypes, usedSize: uint64(hdrSize)}
}
hasDuplicates := false
// iterate over all blobs in index to generate packInfo
idx.Each(ctx, func(blob restic.PackedBlob) {
err = idx.Each(ctx, func(blob restic.PackedBlob) {
ip := indexPack[blob.PackID]
// Set blob type if not yet set
@ -240,6 +250,9 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re
// update indexPack
indexPack[blob.PackID] = ip
})
if err != nil {
return nil, nil, err
}
// if duplicate blobs exist, those will be set to either "used" or "unused":
// - mark only one occurrence of duplicate blobs as used
@ -247,7 +260,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re
// - if there are no used blobs in a pack, possibly mark duplicates as "unused"
if hasDuplicates {
// iterate again over all blobs in index (this is pretty cheap, all in-mem)
idx.Each(ctx, func(blob restic.PackedBlob) {
err = idx.Each(ctx, func(blob restic.PackedBlob) {
bh := blob.BlobHandle
count, ok := usedBlobs[bh]
// skip non-duplicate, aka. normal blobs
@ -285,6 +298,9 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re
// update indexPack
indexPack[blob.PackID] = ip
})
if err != nil {
return nil, nil, err
}
}
// Sanity check. If no duplicates exist, all blobs have value 1. After handling
@ -528,6 +544,9 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (e
printer.P("deleting unreferenced packs\n")
_ = deleteFiles(ctx, true, repo, plan.removePacksFirst, restic.PackFile, printer)
}
if ctx.Err() != nil {
return ctx.Err()
}
if len(plan.repackPacks) != 0 {
printer.P("repacking packs\n")
@ -578,6 +597,9 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (e
printer.P("removing %d old packs\n", len(plan.removePacks))
_ = deleteFiles(ctx, true, repo, plan.removePacks, restic.PackFile, printer)
}
if ctx.Err() != nil {
return ctx.Err()
}
if plan.opts.UnsafeRecovery {
err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, true, printer)

View File

@ -72,7 +72,7 @@ func repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito
return wgCtx.Err()
}
}
return nil
return wgCtx.Err()
})
worker := func() error {

View File

@ -54,7 +54,10 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions,
if err != nil {
return err
}
packSizeFromIndex = pack.Size(ctx, repo.Index(), false)
packSizeFromIndex, err = pack.Size(ctx, repo.Index(), false)
if err != nil {
return err
}
}
printer.P("getting pack files to read...\n")

View File

@ -17,7 +17,7 @@ import (
func listBlobs(repo restic.Repository) restic.BlobSet {
blobs := restic.NewBlobSet()
repo.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
_ = repo.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
blobs.Insert(pb.BlobHandle)
})
return blobs

View File

@ -704,15 +704,21 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error {
defer cancel()
invalidIndex := false
r.idx.Each(ctx, func(blob restic.PackedBlob) {
err := r.idx.Each(ctx, func(blob restic.PackedBlob) {
if blob.IsCompressed() {
invalidIndex = true
}
})
if err != nil {
return err
}
if invalidIndex {
return errors.New("index uses feature not supported by repository version 1")
}
}
if ctx.Err() != nil {
return ctx.Err()
}
// remove index files from the cache which have been removed in the repo
return r.prepareCache()

View File

@ -370,13 +370,13 @@ func testRepositoryIncrementalIndex(t *testing.T, version uint) {
idx, err := loadIndex(context.TODO(), repo, id)
rtest.OK(t, err)
idx.Each(context.TODO(), func(pb restic.PackedBlob) {
rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) {
if _, ok := packEntries[pb.PackID]; !ok {
packEntries[pb.PackID] = make(map[restic.ID]struct{})
}
packEntries[pb.PackID][id] = struct{}{}
})
}))
return nil
})
if err != nil {

View File

@ -103,8 +103,8 @@ type MasterIndex interface {
Lookup(BlobHandle) []PackedBlob
// Each runs fn on all blobs known to the index. When the context is cancelled,
// the index iteration return immediately. This blocks any modification of the index.
Each(ctx context.Context, fn func(PackedBlob))
// the index iteration returns immediately with ctx.Err(). This blocks any modification of the index.
Each(ctx context.Context, fn func(PackedBlob)) error
ListPacks(ctx context.Context, packs IDSet) <-chan PackBlobs
Save(ctx context.Context, repo Repository, excludePacks IDSet, extraObsolete IDs, opts MasterIndexSaveOpts) error