From c3212ab6a65840b0bc89ba216707bc80656a272c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 5 May 2023 23:26:13 +0200 Subject: [PATCH 01/12] test: use standard logging methods from testing for the test helpers Use the logging methods from testing.TB to make use of tb.Helper(). This allows the tests to log the filename and line number in which the test helper was called. Previously the test helper was logged which is rarely useful. --- cmd/restic/integration_test.go | 2 ++ internal/test/helpers.go | 26 +++++++++++++------------- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index 11c048f90..2be089b23 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -31,6 +31,7 @@ import ( ) func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs { + t.Helper() IDs := restic.IDs{} sc := bufio.NewScanner(rd) @@ -148,6 +149,7 @@ func testRunRestoreAssumeFailure(snapshotID string, opts RestoreOptions, gopts G } func testRunCheck(t testing.TB, gopts GlobalOptions) { + t.Helper() opts := CheckOptions{ ReadData: true, CheckUnused: true, diff --git a/internal/test/helpers.go b/internal/test/helpers.go index 93178ae10..65e3e36ec 100644 --- a/internal/test/helpers.go +++ b/internal/test/helpers.go @@ -3,13 +3,11 @@ package test import ( "compress/bzip2" "compress/gzip" - "fmt" "io" "os" "os/exec" "path/filepath" "reflect" - "runtime" "testing" "github.com/restic/restic/internal/errors" @@ -19,30 +17,28 @@ import ( // Assert fails the test if the condition is false. func Assert(tb testing.TB, condition bool, msg string, v ...interface{}) { + tb.Helper() if !condition { - _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...) - tb.FailNow() + tb.Fatalf("\033[31m"+msg+"\033[39m\n\n", v...) } } // OK fails the test if an err is not nil. func OK(tb testing.TB, err error) { + tb.Helper() if err != nil { - _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d: unexpected error: %+v\033[39m\n\n", filepath.Base(file), line, err) - tb.FailNow() + tb.Fatalf("\033[31munexpected error: %+v\033[39m\n\n", err) } } // OKs fails the test if any error from errs is not nil. func OKs(tb testing.TB, errs []error) { + tb.Helper() errFound := false for _, err := range errs { if err != nil { errFound = true - _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d: unexpected error: %+v\033[39m\n\n", filepath.Base(file), line, err.Error()) + tb.Logf("\033[31munexpected error: %+v\033[39m\n\n", err.Error()) } } if errFound { @@ -52,10 +48,9 @@ func OKs(tb testing.TB, errs []error) { // Equals fails the test if exp is not equal to act. func Equals(tb testing.TB, exp, act interface{}) { + tb.Helper() if !reflect.DeepEqual(exp, act) { - _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) - tb.FailNow() + tb.Fatalf("\033[31m\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", exp, act) } } @@ -92,6 +87,7 @@ func Random(seed, count int) []byte { // SetupTarTestFixture extracts the tarFile to outputDir. func SetupTarTestFixture(t testing.TB, outputDir, tarFile string) { + t.Helper() input, err := os.Open(tarFile) OK(t, err) defer func() { @@ -130,6 +126,7 @@ func SetupTarTestFixture(t testing.TB, outputDir, tarFile string) { // Env creates a test environment and extracts the repository fixture. // Returned is the repo path and a cleanup function. func Env(t testing.TB, repoFixture string) (repodir string, cleanup func()) { + t.Helper() tempdir, err := os.MkdirTemp(TestTempDir, "restic-test-env-") OK(t, err) @@ -159,6 +156,7 @@ func isFile(fi os.FileInfo) bool { // This is mainly used for tests on Windows, which is unable to delete a file // set read-only. func ResetReadOnly(t testing.TB, dir string) { + t.Helper() err := filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { if fi == nil { return err @@ -183,6 +181,7 @@ func ResetReadOnly(t testing.TB, dir string) { // RemoveAll recursively resets the read-only flag of all files and dirs and // afterwards uses os.RemoveAll() to remove the path. func RemoveAll(t testing.TB, path string) { + t.Helper() ResetReadOnly(t, path) err := os.RemoveAll(path) if errors.Is(err, os.ErrNotExist) { @@ -194,6 +193,7 @@ func RemoveAll(t testing.TB, path string) { // TempDir returns a temporary directory that is removed by t.Cleanup, // except if TestCleanupTempDirs is set to false. func TempDir(t testing.TB) string { + t.Helper() tempdir, err := os.MkdirTemp(TestTempDir, "restic-test-") if err != nil { t.Fatal(err) From 419e6f26b1b022f669160ab8f8f0096f54d6ec48 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 5 May 2023 23:51:05 +0200 Subject: [PATCH 02/12] tests: Simplify checks that a specific number of snapshots exists --- cmd/restic/integration_test.go | 127 ++++++++++----------------------- 1 file changed, 37 insertions(+), 90 deletions(-) diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index 2be089b23..00b056bca 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -332,18 +332,14 @@ func testBackup(t *testing.T, useFsSnapshot bool) { // first backup testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) - snapshotIDs := testRunList(t, "snapshots", env.gopts) - rtest.Assert(t, len(snapshotIDs) == 1, - "expected one snapshot, got %v", snapshotIDs) + snapshotIDs := testListSnapshots(t, env.gopts, 1) testRunCheck(t, env.gopts) stat1 := dirStats(env.repo) // second backup, implicit incremental testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) - snapshotIDs = testRunList(t, "snapshots", env.gopts) - rtest.Assert(t, len(snapshotIDs) == 2, - "expected two snapshots, got %v", snapshotIDs) + snapshotIDs = testListSnapshots(t, env.gopts, 2) stat2 := dirStats(env.repo) if stat2.size > stat1.size+stat1.size/10 { @@ -355,9 +351,7 @@ func testBackup(t *testing.T, useFsSnapshot bool) { // third backup, explicit incremental opts.Parent = snapshotIDs[0].String() testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) - snapshotIDs = testRunList(t, "snapshots", env.gopts) - rtest.Assert(t, len(snapshotIDs) == 3, - "expected three snapshots, got %v", snapshotIDs) + snapshotIDs = testListSnapshots(t, env.gopts, 3) stat3 := dirStats(env.repo) if stat3.size > stat1.size+stat1.size/10 { @@ -386,9 +380,7 @@ func TestBackupWithRelativePath(t *testing.T) { // first backup testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) - snapshotIDs := testRunList(t, "snapshots", env.gopts) - rtest.Assert(t, len(snapshotIDs) == 1, "expected one snapshot, got %v", snapshotIDs) - firstSnapshotID := snapshotIDs[0] + firstSnapshotID := testListSnapshots(t, env.gopts, 1)[0] // second backup, implicit incremental testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) @@ -408,14 +400,11 @@ func TestBackupParentSelection(t *testing.T) { // first backup testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/0"}, opts, env.gopts) - snapshotIDs := testRunList(t, "snapshots", env.gopts) - rtest.Assert(t, len(snapshotIDs) == 1, "expected one snapshot, got %v", snapshotIDs) - firstSnapshotID := snapshotIDs[0] + firstSnapshotID := testListSnapshots(t, env.gopts, 1)[0] // second backup, sibling path testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/tests"}, opts, env.gopts) - snapshotIDs = testRunList(t, "snapshots", env.gopts) - rtest.Assert(t, len(snapshotIDs) == 2, "expected two snapshots, got %v", snapshotIDs) + testListSnapshots(t, env.gopts, 2) // third backup, incremental for the first backup testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/0"}, opts, env.gopts) @@ -436,9 +425,7 @@ func TestDryRunBackup(t *testing.T) { // dry run before first backup testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts) - snapshotIDs := testRunList(t, "snapshots", env.gopts) - rtest.Assert(t, len(snapshotIDs) == 0, - "expected no snapshot, got %v", snapshotIDs) + snapshotIDs := testListSnapshots(t, env.gopts, 0) packIDs := testRunList(t, "packs", env.gopts) rtest.Assert(t, len(packIDs) == 0, "expected no data, got %v", snapshotIDs) @@ -448,13 +435,13 @@ func TestDryRunBackup(t *testing.T) { // first backup testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) - snapshotIDs = testRunList(t, "snapshots", env.gopts) + snapshotIDs = testListSnapshots(t, env.gopts, 1) packIDs = testRunList(t, "packs", env.gopts) indexIDs = testRunList(t, "index", env.gopts) // dry run between backups testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts) - snapshotIDsAfter := testRunList(t, "snapshots", env.gopts) + snapshotIDsAfter := testListSnapshots(t, env.gopts, 1) rtest.Equals(t, snapshotIDs, snapshotIDsAfter) dataIDsAfter := testRunList(t, "packs", env.gopts) rtest.Equals(t, packIDs, dataIDsAfter) @@ -463,13 +450,13 @@ func TestDryRunBackup(t *testing.T) { // second backup, implicit incremental testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) - snapshotIDs = testRunList(t, "snapshots", env.gopts) + snapshotIDs = testListSnapshots(t, env.gopts, 2) packIDs = testRunList(t, "packs", env.gopts) indexIDs = testRunList(t, "index", env.gopts) // another dry run testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts) - snapshotIDsAfter = testRunList(t, "snapshots", env.gopts) + snapshotIDsAfter = testListSnapshots(t, env.gopts, 2) rtest.Equals(t, snapshotIDs, snapshotIDsAfter) dataIDsAfter = testRunList(t, "packs", env.gopts) rtest.Equals(t, packIDs, dataIDsAfter) @@ -717,9 +704,7 @@ func TestBackupErrors(t *testing.T) { err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, gopts) rtest.Assert(t, err != nil, "Assumed failure, but no error occurred.") rtest.Assert(t, err == ErrInvalidSourceData, "Wrong error returned") - snapshotIDs := testRunList(t, "snapshots", env.gopts) - rtest.Assert(t, len(snapshotIDs) == 1, - "expected one snapshot, got %v", snapshotIDs) + testListSnapshots(t, env.gopts, 1) } const ( @@ -853,12 +838,10 @@ func TestCopy(t *testing.T) { testRunInit(t, env2.gopts) testRunCopy(t, env.gopts, env2.gopts) - snapshotIDs := testRunList(t, "snapshots", env.gopts) - copiedSnapshotIDs := testRunList(t, "snapshots", env2.gopts) + snapshotIDs := testListSnapshots(t, env.gopts, 3) + copiedSnapshotIDs := testListSnapshots(t, env2.gopts, 3) // Check that the copies size seems reasonable - rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "expected %v snapshots, found %v", - len(snapshotIDs), len(copiedSnapshotIDs)) stat := dirStats(env.repo) stat2 := dirStats(env2.repo) sizeDiff := int64(stat.size) - int64(stat2.size) @@ -911,36 +894,28 @@ func TestCopyIncremental(t *testing.T) { testRunInit(t, env2.gopts) testRunCopy(t, env.gopts, env2.gopts) - snapshotIDs := testRunList(t, "snapshots", env.gopts) - copiedSnapshotIDs := testRunList(t, "snapshots", env2.gopts) + testListSnapshots(t, env.gopts, 2) + testListSnapshots(t, env2.gopts, 2) // Check that the copies size seems reasonable testRunCheck(t, env2.gopts) - rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "expected %v snapshots, found %v", - len(snapshotIDs), len(copiedSnapshotIDs)) // check that no snapshots are copied, as there are no new ones testRunCopy(t, env.gopts, env2.gopts) testRunCheck(t, env2.gopts) - copiedSnapshotIDs = testRunList(t, "snapshots", env2.gopts) - rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "still expected %v snapshots, found %v", - len(snapshotIDs), len(copiedSnapshotIDs)) + testListSnapshots(t, env2.gopts, 2) // check that only new snapshots are copied testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) testRunCopy(t, env.gopts, env2.gopts) testRunCheck(t, env2.gopts) - snapshotIDs = testRunList(t, "snapshots", env.gopts) - copiedSnapshotIDs = testRunList(t, "snapshots", env2.gopts) - rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "still expected %v snapshots, found %v", - len(snapshotIDs), len(copiedSnapshotIDs)) + testListSnapshots(t, env.gopts, 3) + testListSnapshots(t, env2.gopts, 3) // also test the reverse direction testRunCopy(t, env2.gopts, env.gopts) testRunCheck(t, env.gopts) - snapshotIDs = testRunList(t, "snapshots", env.gopts) - rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "still expected %v snapshots, found %v", - len(copiedSnapshotIDs), len(snapshotIDs)) + testListSnapshots(t, env.gopts, 3) } func TestCopyUnstableJSON(t *testing.T) { @@ -956,10 +931,7 @@ func TestCopyUnstableJSON(t *testing.T) { testRunInit(t, env2.gopts) testRunCopy(t, env.gopts, env2.gopts) testRunCheck(t, env2.gopts) - - copiedSnapshotIDs := testRunList(t, "snapshots", env2.gopts) - rtest.Assert(t, 1 == len(copiedSnapshotIDs), "still expected %v snapshot, found %v", - 1, len(copiedSnapshotIDs)) + testListSnapshots(t, env2.gopts, 1) } func TestInitCopyChunkerParams(t *testing.T) { @@ -1254,7 +1226,7 @@ func TestRestoreFilter(t *testing.T) { testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) testRunCheck(t, env.gopts) - snapshotID := testRunList(t, "snapshots", env.gopts)[0] + snapshotID := testListSnapshots(t, env.gopts, 1)[0] // no restore filter should restore all files testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID) @@ -1376,9 +1348,7 @@ func TestRestoreWithPermissionFailure(t *testing.T) { datafile := filepath.Join("testdata", "repo-restore-permissions-test.tar.gz") rtest.SetupTarTestFixture(t, env.base, datafile) - snapshots := testRunList(t, "snapshots", env.gopts) - rtest.Assert(t, len(snapshots) > 0, - "no snapshots found in repo (%v)", datafile) + snapshots := testListSnapshots(t, env.gopts, 1) globalOptions.stderr = io.Discard defer func() { @@ -1424,7 +1394,7 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) { testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) testRunCheck(t, env.gopts) - snapshotID := testRunList(t, "snapshots", env.gopts)[0] + snapshotID := testListSnapshots(t, env.gopts, 1)[0] // restore with filter "*.ext", this should restore "file.ext", but // since the directories are ignored and only created because of @@ -1648,11 +1618,7 @@ func TestCheckRestoreNoLock(t *testing.T) { testRunCheck(t, env.gopts) - snapshotIDs := testRunList(t, "snapshots", env.gopts) - if len(snapshotIDs) == 0 { - t.Fatalf("found no snapshots") - } - + snapshotIDs := testListSnapshots(t, env.gopts, 4) testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshotIDs[0]) } @@ -1701,19 +1667,14 @@ func createPrunableRepo(t *testing.T, env *testEnvironment) { opts := BackupOptions{} testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts) - firstSnapshot := testRunList(t, "snapshots", env.gopts) - rtest.Assert(t, len(firstSnapshot) == 1, - "expected one snapshot, got %v", firstSnapshot) + firstSnapshot := testListSnapshots(t, env.gopts, 1)[0] testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts) testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) - - snapshotIDs := testRunList(t, "snapshots", env.gopts) - rtest.Assert(t, len(snapshotIDs) == 3, - "expected 3 snapshot, got %v", snapshotIDs) + testListSnapshots(t, env.gopts, 3) testRunForgetJSON(t, env.gopts) - testRunForget(t, env.gopts, firstSnapshot[0].String()) + testRunForget(t, env.gopts, firstSnapshot.String()) } func testPrune(t *testing.T, pruneOpts PruneOptions, checkOpts CheckOptions) { @@ -1752,22 +1713,16 @@ func TestPruneWithDamagedRepository(t *testing.T) { // create and delete snapshot to create unused blobs testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts) - firstSnapshot := testRunList(t, "snapshots", env.gopts) - rtest.Assert(t, len(firstSnapshot) == 1, - "expected one snapshot, got %v", firstSnapshot) - testRunForget(t, env.gopts, firstSnapshot[0].String()) + firstSnapshot := testListSnapshots(t, env.gopts, 1)[0] + testRunForget(t, env.gopts, firstSnapshot.String()) oldPacks := listPacks(env.gopts, t) // create new snapshot, but lose all data testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) - snapshotIDs := testRunList(t, "snapshots", env.gopts) - + testListSnapshots(t, env.gopts, 1) removePacksExcept(env.gopts, t, oldPacks, false) - rtest.Assert(t, len(snapshotIDs) == 1, - "expected one snapshot, got %v", snapshotIDs) - oldHook := env.gopts.backendTestHook env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil } defer func() { @@ -1938,9 +1893,7 @@ func TestHardLink(t *testing.T) { // first backup testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) - snapshotIDs := testRunList(t, "snapshots", env.gopts) - rtest.Assert(t, len(snapshotIDs) == 1, - "expected one snapshot, got %v", snapshotIDs) + snapshotIDs := testListSnapshots(t, env.gopts, 1) testRunCheck(t, env.gopts) @@ -2017,17 +1970,13 @@ func TestQuietBackup(t *testing.T) { env.gopts.Quiet = false testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) - snapshotIDs := testRunList(t, "snapshots", env.gopts) - rtest.Assert(t, len(snapshotIDs) == 1, - "expected one snapshot, got %v", snapshotIDs) + testListSnapshots(t, env.gopts, 1) testRunCheck(t, env.gopts) env.gopts.Quiet = true testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) - snapshotIDs = testRunList(t, "snapshots", env.gopts) - rtest.Assert(t, len(snapshotIDs) == 2, - "expected two snapshots, got %v", snapshotIDs) + testListSnapshots(t, env.gopts, 2) testRunCheck(t, env.gopts) } @@ -2242,9 +2191,7 @@ func TestBackendLoadWriteTo(t *testing.T) { // loading snapshots must still work env.gopts.NoCache = false - firstSnapshot := testRunList(t, "snapshots", env.gopts) - rtest.Assert(t, len(firstSnapshot) == 1, - "expected one snapshot, got %v", firstSnapshot) + testListSnapshots(t, env.gopts, 1) } func TestFindListOnce(t *testing.T) { @@ -2260,9 +2207,9 @@ func TestFindListOnce(t *testing.T) { testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts) testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts) - secondSnapshot := testRunList(t, "snapshots", env.gopts) + secondSnapshot := testListSnapshots(t, env.gopts, 2) testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) - thirdSnapshot := restic.NewIDSet(testRunList(t, "snapshots", env.gopts)...) + thirdSnapshot := restic.NewIDSet(testListSnapshots(t, env.gopts, 3)...) repo, err := OpenRepository(context.TODO(), env.gopts) rtest.OK(t, err) From 06fd6b54d78870e674dceb0c9319967a7b535415 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 5 May 2023 23:56:58 +0200 Subject: [PATCH 03/12] test: print log output if testRunCheck fails --- .../integration_repair_snapshots_test.go | 6 ++-- cmd/restic/integration_test.go | 28 ++++++++++--------- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/cmd/restic/integration_repair_snapshots_test.go b/cmd/restic/integration_repair_snapshots_test.go index 04ef6ad1d..34cd186d3 100644 --- a/cmd/restic/integration_repair_snapshots_test.go +++ b/cmd/restic/integration_repair_snapshots_test.go @@ -64,7 +64,7 @@ func TestRepairSnapshotsWithLostData(t *testing.T) { // repository must be ok after removing the broken snapshots testRunForget(t, env.gopts, snapshotIDs[0].String(), snapshotIDs[1].String()) testListSnapshots(t, env.gopts, 2) - _, err := testRunCheckOutput(env.gopts) + _, err := testRunCheckOutput(env.gopts, false) rtest.OK(t, err) } @@ -93,7 +93,7 @@ func TestRepairSnapshotsWithLostTree(t *testing.T) { testRunRebuildIndex(t, env.gopts) testRunRepairSnapshot(t, env.gopts, true) testListSnapshots(t, env.gopts, 1) - _, err := testRunCheckOutput(env.gopts) + _, err := testRunCheckOutput(env.gopts, false) rtest.OK(t, err) } @@ -116,7 +116,7 @@ func TestRepairSnapshotsWithLostRootTree(t *testing.T) { testRunRebuildIndex(t, env.gopts) testRunRepairSnapshot(t, env.gopts, true) testListSnapshots(t, env.gopts, 0) - _, err := testRunCheckOutput(env.gopts) + _, err := testRunCheckOutput(env.gopts, false) rtest.OK(t, err) } diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index 00b056bca..16b0bedbb 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -150,14 +150,20 @@ func testRunRestoreAssumeFailure(snapshotID string, opts RestoreOptions, gopts G func testRunCheck(t testing.TB, gopts GlobalOptions) { t.Helper() - opts := CheckOptions{ - ReadData: true, - CheckUnused: true, + output, err := testRunCheckOutput(gopts, true) + if err != nil { + t.Error(output) + t.Fatalf("unexpected error: %+v", err) } - rtest.OK(t, runCheck(context.TODO(), opts, gopts, nil)) } -func testRunCheckOutput(gopts GlobalOptions) (string, error) { +func testRunCheckMustFail(t testing.TB, gopts GlobalOptions) { + t.Helper() + _, err := testRunCheckOutput(gopts, false) + rtest.Assert(t, err != nil, "expected non nil error after check of damaged repository") +} + +func testRunCheckOutput(gopts GlobalOptions, checkUnused bool) (string, error) { buf := bytes.NewBuffer(nil) globalOptions.stdout = buf @@ -166,18 +172,14 @@ func testRunCheckOutput(gopts GlobalOptions) (string, error) { }() opts := CheckOptions{ - ReadData: true, + ReadData: true, + CheckUnused: checkUnused, } err := runCheck(context.TODO(), opts, gopts, nil) return buf.String(), err } -func testRunCheckMustFail(t testing.TB, gopts GlobalOptions) { - _, err := testRunCheckOutput(gopts) - rtest.Assert(t, err != nil, "expected non nil error after check of damaged repository") -} - func testRunDiffOutput(gopts GlobalOptions, firstSnapshotID string, secondSnapshotID string) (string, error) { buf := bytes.NewBuffer(nil) @@ -1488,7 +1490,7 @@ func testRebuildIndex(t *testing.T, backendTestHook backendWrapper) { datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz") rtest.SetupTarTestFixture(t, env.base, datafile) - out, err := testRunCheckOutput(env.gopts) + out, err := testRunCheckOutput(env.gopts, false) if !strings.Contains(out, "contained in several indexes") { t.Fatalf("did not find checker hint for packs in several indexes") } @@ -1505,7 +1507,7 @@ func testRebuildIndex(t *testing.T, backendTestHook backendWrapper) { testRunRebuildIndex(t, env.gopts) env.gopts.backendTestHook = nil - out, err = testRunCheckOutput(env.gopts) + out, err = testRunCheckOutput(env.gopts, false) if len(out) != 0 { t.Fatalf("expected no output from the checker, got: %v", out) } From e2dba9f5c72ec0634117861caa894e7f1b57fa68 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 6 May 2023 00:11:45 +0200 Subject: [PATCH 04/12] test: cleanup a some check calls --- cmd/restic/integration_test.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index 16b0bedbb..ef4ccb01e 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -541,8 +541,7 @@ func TestBackupSelfHealing(t *testing.T) { testRunRebuildIndex(t, env.gopts) // now the repo is also missing the data blob in the index; check should report this - rtest.Assert(t, runCheck(context.TODO(), CheckOptions{}, env.gopts, nil) != nil, - "check should have reported an error") + testRunCheckMustFail(t, env.gopts) // second backup should report an error but "heal" this situation err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) @@ -583,7 +582,7 @@ func TestBackupTreeLoadError(t *testing.T) { } testRunRebuildIndex(t, env.gopts) // now the repo is missing the tree blob in the index; check should report this - rtest.Assert(t, runCheck(context.TODO(), CheckOptions{}, env.gopts, nil) != nil, "check should have reported an error") + testRunCheckMustFail(t, env.gopts) // second backup should report an error but "heal" this situation err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) rtest.Assert(t, err != nil, "backup should have reported an error for the subdirectory") @@ -593,7 +592,7 @@ func TestBackupTreeLoadError(t *testing.T) { removePacksExcept(env.gopts, t, restic.NewIDSet(), true) testRunRebuildIndex(t, env.gopts) // now the repo is also missing the data blob in the index; check should report this - rtest.Assert(t, runCheck(context.TODO(), CheckOptions{}, env.gopts, nil) != nil, "check should have reported an error") + testRunCheckMustFail(t, env.gopts) // second backup should report an error but "heal" this situation err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) rtest.Assert(t, err != nil, "backup should have reported an error") From 675a49a95bd5cf9ae7ad7ad107efa856121dcf4a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 6 May 2023 10:37:17 +0200 Subject: [PATCH 05/12] Restructure integration tests The tests are now split into individual files for each command. The separation isn't perfect as many tests make use of multiple commands. In particular `init`, `backup`, `check` and `list` are used by a larger number of test cases. Most tests now reside in files name cmd__integration_test.go. This provides a certain indication which commands have significant test coverage. --- cmd/restic/cmd_backup_integration_test.go | 570 +++++ cmd/restic/cmd_check_integration_test.go | 42 + cmd/restic/cmd_copy_integration_test.go | 136 ++ cmd/restic/cmd_diff_integration_test.go | 202 ++ cmd/restic/cmd_find_integration_test.go | 94 + cmd/restic/cmd_forget_integration_test.go | 13 + cmd/restic/cmd_init_integration_test.go | 49 + cmd/restic/cmd_key_integration_test.go | 151 ++ cmd/restic/cmd_list_integration_test.go | 49 + cmd/restic/cmd_ls_integration_test.go | 28 + ..._test.go => cmd_mount_integration_test.go} | 0 cmd/restic/cmd_prune_integration_test.go | 229 ++ .../cmd_repair_index_integration_test.go | 143 ++ ... cmd_repair_snapshots_integration_test.go} | 0 cmd/restic/cmd_restore_integration_test.go | 305 +++ ...est.go => cmd_rewrite_integration_test.go} | 0 cmd/restic/cmd_snapshots_integration_test.go | 38 + cmd/restic/cmd_tag_integration_test.go | 94 + cmd/restic/integration_helpers_test.go | 123 + cmd/restic/integration_test.go | 2059 ----------------- cmd/restic/local_layout_test.go | 41 - 21 files changed, 2266 insertions(+), 2100 deletions(-) create mode 100644 cmd/restic/cmd_backup_integration_test.go create mode 100644 cmd/restic/cmd_check_integration_test.go create mode 100644 cmd/restic/cmd_copy_integration_test.go create mode 100644 cmd/restic/cmd_diff_integration_test.go create mode 100644 cmd/restic/cmd_find_integration_test.go create mode 100644 cmd/restic/cmd_forget_integration_test.go create mode 100644 cmd/restic/cmd_init_integration_test.go create mode 100644 cmd/restic/cmd_key_integration_test.go create mode 100644 cmd/restic/cmd_list_integration_test.go create mode 100644 cmd/restic/cmd_ls_integration_test.go rename cmd/restic/{integration_fuse_test.go => cmd_mount_integration_test.go} (100%) create mode 100644 cmd/restic/cmd_prune_integration_test.go create mode 100644 cmd/restic/cmd_repair_index_integration_test.go rename cmd/restic/{integration_repair_snapshots_test.go => cmd_repair_snapshots_integration_test.go} (100%) create mode 100644 cmd/restic/cmd_restore_integration_test.go rename cmd/restic/{integration_rewrite_test.go => cmd_rewrite_integration_test.go} (100%) create mode 100644 cmd/restic/cmd_snapshots_integration_test.go create mode 100644 cmd/restic/cmd_tag_integration_test.go delete mode 100644 cmd/restic/local_layout_test.go diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go new file mode 100644 index 000000000..b6491dfbf --- /dev/null +++ b/cmd/restic/cmd_backup_integration_test.go @@ -0,0 +1,570 @@ +package main + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" + "golang.org/x/sync/errgroup" +) + +func testRunBackupAssumeFailure(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) error { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + var wg errgroup.Group + term := termstatus.New(gopts.stdout, gopts.stderr, gopts.Quiet) + wg.Go(func() error { term.Run(ctx); return nil }) + + gopts.stdout = io.Discard + t.Logf("backing up %v in %v", target, dir) + if dir != "" { + cleanup := rtest.Chdir(t, dir) + defer cleanup() + } + + opts.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true} + backupErr := runBackup(ctx, opts, gopts, term, target) + + cancel() + + err := wg.Wait() + if err != nil { + t.Fatal(err) + } + + return backupErr +} + +func testRunBackup(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) { + err := testRunBackupAssumeFailure(t, dir, target, opts, gopts) + rtest.Assert(t, err == nil, "Error while backing up") +} + +func TestBackup(t *testing.T) { + testBackup(t, false) +} + +func TestBackupWithFilesystemSnapshots(t *testing.T) { + if runtime.GOOS == "windows" && fs.HasSufficientPrivilegesForVSS() == nil { + testBackup(t, true) + } +} + +func testBackup(t *testing.T, useFsSnapshot bool) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testSetupBackupData(t, env) + opts := BackupOptions{UseFsSnapshot: useFsSnapshot} + + // first backup + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + testListSnapshots(t, env.gopts, 1) + + testRunCheck(t, env.gopts) + stat1 := dirStats(env.repo) + + // second backup, implicit incremental + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + snapshotIDs := testListSnapshots(t, env.gopts, 2) + + stat2 := dirStats(env.repo) + if stat2.size > stat1.size+stat1.size/10 { + t.Error("repository size has grown by more than 10 percent") + } + t.Logf("repository grown by %d bytes", stat2.size-stat1.size) + + testRunCheck(t, env.gopts) + // third backup, explicit incremental + opts.Parent = snapshotIDs[0].String() + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + snapshotIDs = testListSnapshots(t, env.gopts, 3) + + stat3 := dirStats(env.repo) + if stat3.size > stat1.size+stat1.size/10 { + t.Error("repository size has grown by more than 10 percent") + } + t.Logf("repository grown by %d bytes", stat3.size-stat2.size) + + // restore all backups and compare + for i, snapshotID := range snapshotIDs { + restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) + t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir) + testRunRestore(t, env.gopts, restoredir, snapshotID) + diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata")) + rtest.Assert(t, diff == "", "directories are not equal: %v", diff) + } + + testRunCheck(t, env.gopts) +} + +func TestBackupWithRelativePath(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testSetupBackupData(t, env) + opts := BackupOptions{} + + // first backup + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + firstSnapshotID := testListSnapshots(t, env.gopts, 1)[0] + + // second backup, implicit incremental + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + + // that the correct parent snapshot was used + latestSn, _ := testRunSnapshots(t, env.gopts) + rtest.Assert(t, latestSn != nil, "missing latest snapshot") + rtest.Assert(t, latestSn.Parent != nil && latestSn.Parent.Equal(firstSnapshotID), "second snapshot selected unexpected parent %v instead of %v", latestSn.Parent, firstSnapshotID) +} + +func TestBackupParentSelection(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testSetupBackupData(t, env) + opts := BackupOptions{} + + // first backup + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/0"}, opts, env.gopts) + firstSnapshotID := testListSnapshots(t, env.gopts, 1)[0] + + // second backup, sibling path + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/tests"}, opts, env.gopts) + testListSnapshots(t, env.gopts, 2) + + // third backup, incremental for the first backup + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/0"}, opts, env.gopts) + + // test that the correct parent snapshot was used + latestSn, _ := testRunSnapshots(t, env.gopts) + rtest.Assert(t, latestSn != nil, "missing latest snapshot") + rtest.Assert(t, latestSn.Parent != nil && latestSn.Parent.Equal(firstSnapshotID), "third snapshot selected unexpected parent %v instead of %v", latestSn.Parent, firstSnapshotID) +} + +func TestDryRunBackup(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testSetupBackupData(t, env) + opts := BackupOptions{} + dryOpts := BackupOptions{DryRun: true} + + // dry run before first backup + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts) + snapshotIDs := testListSnapshots(t, env.gopts, 0) + packIDs := testRunList(t, "packs", env.gopts) + rtest.Assert(t, len(packIDs) == 0, + "expected no data, got %v", snapshotIDs) + indexIDs := testRunList(t, "index", env.gopts) + rtest.Assert(t, len(indexIDs) == 0, + "expected no index, got %v", snapshotIDs) + + // first backup + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + snapshotIDs = testListSnapshots(t, env.gopts, 1) + packIDs = testRunList(t, "packs", env.gopts) + indexIDs = testRunList(t, "index", env.gopts) + + // dry run between backups + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts) + snapshotIDsAfter := testListSnapshots(t, env.gopts, 1) + rtest.Equals(t, snapshotIDs, snapshotIDsAfter) + dataIDsAfter := testRunList(t, "packs", env.gopts) + rtest.Equals(t, packIDs, dataIDsAfter) + indexIDsAfter := testRunList(t, "index", env.gopts) + rtest.Equals(t, indexIDs, indexIDsAfter) + + // second backup, implicit incremental + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + snapshotIDs = testListSnapshots(t, env.gopts, 2) + packIDs = testRunList(t, "packs", env.gopts) + indexIDs = testRunList(t, "index", env.gopts) + + // another dry run + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts) + snapshotIDsAfter = testListSnapshots(t, env.gopts, 2) + rtest.Equals(t, snapshotIDs, snapshotIDsAfter) + dataIDsAfter = testRunList(t, "packs", env.gopts) + rtest.Equals(t, packIDs, dataIDsAfter) + indexIDsAfter = testRunList(t, "index", env.gopts) + rtest.Equals(t, indexIDs, indexIDsAfter) +} + +func TestBackupNonExistingFile(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testSetupBackupData(t, env) + globalOptions.stderr = io.Discard + defer func() { + globalOptions.stderr = os.Stderr + }() + + p := filepath.Join(env.testdata, "0", "0", "9") + dirs := []string{ + filepath.Join(p, "0"), + filepath.Join(p, "1"), + filepath.Join(p, "nonexisting"), + filepath.Join(p, "5"), + } + + opts := BackupOptions{} + + testRunBackup(t, "", dirs, opts, env.gopts) +} + +func TestBackupSelfHealing(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + p := filepath.Join(env.testdata, "test/test") + rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) + rtest.OK(t, appendRandomData(p, 5)) + + opts := BackupOptions{} + + testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + testRunCheck(t, env.gopts) + + // remove all data packs + removePacksExcept(env.gopts, t, restic.NewIDSet(), false) + + testRunRebuildIndex(t, env.gopts) + // now the repo is also missing the data blob in the index; check should report this + testRunCheckMustFail(t, env.gopts) + + // second backup should report an error but "heal" this situation + err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + rtest.Assert(t, err != nil, + "backup should have reported an error") + testRunCheck(t, env.gopts) +} + +func TestBackupTreeLoadError(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + p := filepath.Join(env.testdata, "test/test") + rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) + rtest.OK(t, appendRandomData(p, 5)) + + opts := BackupOptions{} + // Backup a subdirectory first, such that we can remove the tree pack for the subdirectory + testRunBackup(t, env.testdata, []string{"test"}, opts, env.gopts) + + r, err := OpenRepository(context.TODO(), env.gopts) + rtest.OK(t, err) + rtest.OK(t, r.LoadIndex(context.TODO())) + treePacks := restic.NewIDSet() + r.Index().Each(context.TODO(), func(pb restic.PackedBlob) { + if pb.Type == restic.TreeBlob { + treePacks.Insert(pb.PackID) + } + }) + + testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + testRunCheck(t, env.gopts) + + // delete the subdirectory pack first + for id := range treePacks { + rtest.OK(t, r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()})) + } + testRunRebuildIndex(t, env.gopts) + // now the repo is missing the tree blob in the index; check should report this + testRunCheckMustFail(t, env.gopts) + // second backup should report an error but "heal" this situation + err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + rtest.Assert(t, err != nil, "backup should have reported an error for the subdirectory") + testRunCheck(t, env.gopts) + + // remove all tree packs + removePacksExcept(env.gopts, t, restic.NewIDSet(), true) + testRunRebuildIndex(t, env.gopts) + // now the repo is also missing the data blob in the index; check should report this + testRunCheckMustFail(t, env.gopts) + // second backup should report an error but "heal" this situation + err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + rtest.Assert(t, err != nil, "backup should have reported an error") + testRunCheck(t, env.gopts) +} + +var backupExcludeFilenames = []string{ + "testfile1", + "foo.tar.gz", + "private/secret/passwords.txt", + "work/source/test.c", +} + +func TestBackupExclude(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + datadir := filepath.Join(env.base, "testdata") + + for _, filename := range backupExcludeFilenames { + fp := filepath.Join(datadir, filename) + rtest.OK(t, os.MkdirAll(filepath.Dir(fp), 0755)) + + f, err := os.Create(fp) + rtest.OK(t, err) + + fmt.Fprint(f, filename) + rtest.OK(t, f.Close()) + } + + snapshots := make(map[string]struct{}) + + opts := BackupOptions{} + + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) + files := testRunLs(t, env.gopts, snapshotID) + rtest.Assert(t, includes(files, "/testdata/foo.tar.gz"), + "expected file %q in first snapshot, but it's not included", "foo.tar.gz") + + opts.Excludes = []string{"*.tar.gz"} + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) + files = testRunLs(t, env.gopts, snapshotID) + rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"), + "expected file %q not in first snapshot, but it's included", "foo.tar.gz") + + opts.Excludes = []string{"*.tar.gz", "private/secret"} + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + _, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) + files = testRunLs(t, env.gopts, snapshotID) + rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"), + "expected file %q not in first snapshot, but it's included", "foo.tar.gz") + rtest.Assert(t, !includes(files, "/testdata/private/secret/passwords.txt"), + "expected file %q not in first snapshot, but it's included", "passwords.txt") +} + +func TestBackupErrors(t *testing.T) { + if runtime.GOOS == "windows" { + return + } + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testSetupBackupData(t, env) + + // Assume failure + inaccessibleFile := filepath.Join(env.testdata, "0", "0", "9", "0") + rtest.OK(t, os.Chmod(inaccessibleFile, 0000)) + defer func() { + rtest.OK(t, os.Chmod(inaccessibleFile, 0644)) + }() + opts := BackupOptions{} + gopts := env.gopts + gopts.stderr = io.Discard + err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, gopts) + rtest.Assert(t, err != nil, "Assumed failure, but no error occurred.") + rtest.Assert(t, err == ErrInvalidSourceData, "Wrong error returned") + testListSnapshots(t, env.gopts, 1) +} + +const ( + incrementalFirstWrite = 10 * 1042 * 1024 + incrementalSecondWrite = 1 * 1042 * 1024 + incrementalThirdWrite = 1 * 1042 * 1024 +) + +func TestIncrementalBackup(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + datadir := filepath.Join(env.base, "testdata") + testfile := filepath.Join(datadir, "testfile") + + rtest.OK(t, appendRandomData(testfile, incrementalFirstWrite)) + + opts := BackupOptions{} + + testRunBackup(t, "", []string{datadir}, opts, env.gopts) + testRunCheck(t, env.gopts) + stat1 := dirStats(env.repo) + + rtest.OK(t, appendRandomData(testfile, incrementalSecondWrite)) + + testRunBackup(t, "", []string{datadir}, opts, env.gopts) + testRunCheck(t, env.gopts) + stat2 := dirStats(env.repo) + if stat2.size-stat1.size > incrementalFirstWrite { + t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite) + } + t.Logf("repository grown by %d bytes", stat2.size-stat1.size) + + rtest.OK(t, appendRandomData(testfile, incrementalThirdWrite)) + + testRunBackup(t, "", []string{datadir}, opts, env.gopts) + testRunCheck(t, env.gopts) + stat3 := dirStats(env.repo) + if stat3.size-stat2.size > incrementalFirstWrite { + t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite) + } + t.Logf("repository grown by %d bytes", stat3.size-stat2.size) +} + +func TestBackupTags(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testSetupBackupData(t, env) + opts := BackupOptions{} + + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) + testRunCheck(t, env.gopts) + newest, _ := testRunSnapshots(t, env.gopts) + + if newest == nil { + t.Fatal("expected a backup, got nil") + } + + rtest.Assert(t, len(newest.Tags) == 0, + "expected no tags, got %v", newest.Tags) + parent := newest + + opts.Tags = restic.TagLists{[]string{"NL"}} + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) + testRunCheck(t, env.gopts) + newest, _ = testRunSnapshots(t, env.gopts) + + if newest == nil { + t.Fatal("expected a backup, got nil") + } + + rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL", + "expected one NL tag, got %v", newest.Tags) + // Tagged backup should have untagged backup as parent. + rtest.Assert(t, parent.ID.Equal(*newest.Parent), + "expected parent to be %v, got %v", parent.ID, newest.Parent) +} + +func TestQuietBackup(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testSetupBackupData(t, env) + opts := BackupOptions{} + + env.gopts.Quiet = false + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) + testListSnapshots(t, env.gopts, 1) + + testRunCheck(t, env.gopts) + + env.gopts.Quiet = true + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) + testListSnapshots(t, env.gopts, 2) + + testRunCheck(t, env.gopts) +} + +func TestHardLink(t *testing.T) { + // this test assumes a test set with a single directory containing hard linked files + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := filepath.Join("testdata", "test.hl.tar.gz") + fd, err := os.Open(datafile) + if os.IsNotExist(err) { + t.Skipf("unable to find data file %q, skipping", datafile) + return + } + rtest.OK(t, err) + rtest.OK(t, fd.Close()) + + testRunInit(t, env.gopts) + + rtest.SetupTarTestFixture(t, env.testdata, datafile) + + linkTests := createFileSetPerHardlink(env.testdata) + + opts := BackupOptions{} + + // first backup + testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + snapshotIDs := testListSnapshots(t, env.gopts, 1) + + testRunCheck(t, env.gopts) + + // restore all backups and compare + for i, snapshotID := range snapshotIDs { + restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) + t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir) + testRunRestore(t, env.gopts, restoredir, snapshotID) + diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata")) + rtest.Assert(t, diff == "", "directories are not equal %v", diff) + + linkResults := createFileSetPerHardlink(filepath.Join(restoredir, "testdata")) + rtest.Assert(t, linksEqual(linkTests, linkResults), + "links are not equal") + } + + testRunCheck(t, env.gopts) +} + +func linksEqual(source, dest map[uint64][]string) bool { + for _, vs := range source { + found := false + for kd, vd := range dest { + if linkEqual(vs, vd) { + delete(dest, kd) + found = true + break + } + } + if !found { + return false + } + } + + return len(dest) == 0 +} + +func linkEqual(source, dest []string) bool { + // equal if sliced are equal without considering order + if source == nil && dest == nil { + return true + } + + if source == nil || dest == nil { + return false + } + + if len(source) != len(dest) { + return false + } + + for i := range source { + found := false + for j := range dest { + if source[i] == dest[j] { + found = true + break + } + } + if !found { + return false + } + } + + return true +} diff --git a/cmd/restic/cmd_check_integration_test.go b/cmd/restic/cmd_check_integration_test.go new file mode 100644 index 000000000..05bc436c4 --- /dev/null +++ b/cmd/restic/cmd_check_integration_test.go @@ -0,0 +1,42 @@ +package main + +import ( + "bytes" + "context" + "os" + "testing" + + rtest "github.com/restic/restic/internal/test" +) + +func testRunCheck(t testing.TB, gopts GlobalOptions) { + t.Helper() + output, err := testRunCheckOutput(gopts, true) + if err != nil { + t.Error(output) + t.Fatalf("unexpected error: %+v", err) + } +} + +func testRunCheckMustFail(t testing.TB, gopts GlobalOptions) { + t.Helper() + _, err := testRunCheckOutput(gopts, false) + rtest.Assert(t, err != nil, "expected non nil error after check of damaged repository") +} + +func testRunCheckOutput(gopts GlobalOptions, checkUnused bool) (string, error) { + buf := bytes.NewBuffer(nil) + + globalOptions.stdout = buf + defer func() { + globalOptions.stdout = os.Stdout + }() + + opts := CheckOptions{ + ReadData: true, + CheckUnused: checkUnused, + } + + err := runCheck(context.TODO(), opts, gopts, nil) + return buf.String(), err +} diff --git a/cmd/restic/cmd_copy_integration_test.go b/cmd/restic/cmd_copy_integration_test.go new file mode 100644 index 000000000..1c8837690 --- /dev/null +++ b/cmd/restic/cmd_copy_integration_test.go @@ -0,0 +1,136 @@ +package main + +import ( + "context" + "fmt" + "path/filepath" + "testing" + + rtest "github.com/restic/restic/internal/test" +) + +func testRunCopy(t testing.TB, srcGopts GlobalOptions, dstGopts GlobalOptions) { + gopts := srcGopts + gopts.Repo = dstGopts.Repo + gopts.password = dstGopts.password + copyOpts := CopyOptions{ + secondaryRepoOptions: secondaryRepoOptions{ + Repo: srcGopts.Repo, + password: srcGopts.password, + }, + } + + rtest.OK(t, runCopy(context.TODO(), copyOpts, gopts, nil)) +} + +func TestCopy(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + env2, cleanup2 := withTestEnvironment(t) + defer cleanup2() + + testSetupBackupData(t, env) + opts := BackupOptions{} + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts) + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts) + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) + testRunCheck(t, env.gopts) + + testRunInit(t, env2.gopts) + testRunCopy(t, env.gopts, env2.gopts) + + snapshotIDs := testListSnapshots(t, env.gopts, 3) + copiedSnapshotIDs := testListSnapshots(t, env2.gopts, 3) + + // Check that the copies size seems reasonable + stat := dirStats(env.repo) + stat2 := dirStats(env2.repo) + sizeDiff := int64(stat.size) - int64(stat2.size) + if sizeDiff < 0 { + sizeDiff = -sizeDiff + } + rtest.Assert(t, sizeDiff < int64(stat.size)/50, "expected less than 2%% size difference: %v vs. %v", + stat.size, stat2.size) + + // Check integrity of the copy + testRunCheck(t, env2.gopts) + + // Check that the copied snapshots have the same tree contents as the old ones (= identical tree hash) + origRestores := make(map[string]struct{}) + for i, snapshotID := range snapshotIDs { + restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) + origRestores[restoredir] = struct{}{} + testRunRestore(t, env.gopts, restoredir, snapshotID) + } + for i, snapshotID := range copiedSnapshotIDs { + restoredir := filepath.Join(env2.base, fmt.Sprintf("restore%d", i)) + testRunRestore(t, env2.gopts, restoredir, snapshotID) + foundMatch := false + for cmpdir := range origRestores { + diff := directoriesContentsDiff(restoredir, cmpdir) + if diff == "" { + delete(origRestores, cmpdir) + foundMatch = true + } + } + + rtest.Assert(t, foundMatch, "found no counterpart for snapshot %v", snapshotID) + } + + rtest.Assert(t, len(origRestores) == 0, "found not copied snapshots") +} + +func TestCopyIncremental(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + env2, cleanup2 := withTestEnvironment(t) + defer cleanup2() + + testSetupBackupData(t, env) + opts := BackupOptions{} + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts) + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts) + testRunCheck(t, env.gopts) + + testRunInit(t, env2.gopts) + testRunCopy(t, env.gopts, env2.gopts) + + testListSnapshots(t, env.gopts, 2) + testListSnapshots(t, env2.gopts, 2) + + // Check that the copies size seems reasonable + testRunCheck(t, env2.gopts) + + // check that no snapshots are copied, as there are no new ones + testRunCopy(t, env.gopts, env2.gopts) + testRunCheck(t, env2.gopts) + testListSnapshots(t, env2.gopts, 2) + + // check that only new snapshots are copied + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) + testRunCopy(t, env.gopts, env2.gopts) + testRunCheck(t, env2.gopts) + testListSnapshots(t, env.gopts, 3) + testListSnapshots(t, env2.gopts, 3) + + // also test the reverse direction + testRunCopy(t, env2.gopts, env.gopts) + testRunCheck(t, env.gopts) + testListSnapshots(t, env.gopts, 3) +} + +func TestCopyUnstableJSON(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + env2, cleanup2 := withTestEnvironment(t) + defer cleanup2() + + // contains a symlink created using `ln -s '../i/'$'\355\246\361''d/samba' broken-symlink` + datafile := filepath.Join("testdata", "copy-unstable-json.tar.gz") + rtest.SetupTarTestFixture(t, env.base, datafile) + + testRunInit(t, env2.gopts) + testRunCopy(t, env.gopts, env2.gopts) + testRunCheck(t, env2.gopts) + testListSnapshots(t, env2.gopts, 1) +} diff --git a/cmd/restic/cmd_diff_integration_test.go b/cmd/restic/cmd_diff_integration_test.go new file mode 100644 index 000000000..ae145fedf --- /dev/null +++ b/cmd/restic/cmd_diff_integration_test.go @@ -0,0 +1,202 @@ +package main + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "testing" + + rtest "github.com/restic/restic/internal/test" +) + +func testRunDiffOutput(gopts GlobalOptions, firstSnapshotID string, secondSnapshotID string) (string, error) { + buf := bytes.NewBuffer(nil) + + globalOptions.stdout = buf + oldStdout := gopts.stdout + gopts.stdout = buf + defer func() { + globalOptions.stdout = os.Stdout + gopts.stdout = oldStdout + }() + + opts := DiffOptions{ + ShowMetadata: false, + } + err := runDiff(context.TODO(), opts, gopts, []string{firstSnapshotID, secondSnapshotID}) + return buf.String(), err +} + +func copyFile(dst string, src string) error { + srcFile, err := os.Open(src) + if err != nil { + return err + } + + dstFile, err := os.Create(dst) + if err != nil { + // ignore subsequent errors + _ = srcFile.Close() + return err + } + + _, err = io.Copy(dstFile, srcFile) + if err != nil { + // ignore subsequent errors + _ = srcFile.Close() + _ = dstFile.Close() + return err + } + + err = srcFile.Close() + if err != nil { + // ignore subsequent errors + _ = dstFile.Close() + return err + } + + err = dstFile.Close() + if err != nil { + return err + } + + return nil +} + +var diffOutputRegexPatterns = []string{ + "-.+modfile", + "M.+modfile1", + "\\+.+modfile2", + "\\+.+modfile3", + "\\+.+modfile4", + "-.+submoddir", + "-.+submoddir.subsubmoddir", + "\\+.+submoddir2", + "\\+.+submoddir2.subsubmoddir", + "Files: +2 new, +1 removed, +1 changed", + "Dirs: +3 new, +2 removed", + "Data Blobs: +2 new, +1 removed", + "Added: +7[0-9]{2}\\.[0-9]{3} KiB", + "Removed: +2[0-9]{2}\\.[0-9]{3} KiB", +} + +func setupDiffRepo(t *testing.T) (*testEnvironment, func(), string, string) { + env, cleanup := withTestEnvironment(t) + testRunInit(t, env.gopts) + + datadir := filepath.Join(env.base, "testdata") + testdir := filepath.Join(datadir, "testdir") + subtestdir := filepath.Join(testdir, "subtestdir") + testfile := filepath.Join(testdir, "testfile") + + rtest.OK(t, os.Mkdir(testdir, 0755)) + rtest.OK(t, os.Mkdir(subtestdir, 0755)) + rtest.OK(t, appendRandomData(testfile, 256*1024)) + + moddir := filepath.Join(datadir, "moddir") + submoddir := filepath.Join(moddir, "submoddir") + subsubmoddir := filepath.Join(submoddir, "subsubmoddir") + modfile := filepath.Join(moddir, "modfile") + rtest.OK(t, os.Mkdir(moddir, 0755)) + rtest.OK(t, os.Mkdir(submoddir, 0755)) + rtest.OK(t, os.Mkdir(subsubmoddir, 0755)) + rtest.OK(t, copyFile(modfile, testfile)) + rtest.OK(t, appendRandomData(modfile+"1", 256*1024)) + + snapshots := make(map[string]struct{}) + opts := BackupOptions{} + testRunBackup(t, "", []string{datadir}, opts, env.gopts) + snapshots, firstSnapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) + + rtest.OK(t, os.Rename(modfile, modfile+"3")) + rtest.OK(t, os.Rename(submoddir, submoddir+"2")) + rtest.OK(t, appendRandomData(modfile+"1", 256*1024)) + rtest.OK(t, appendRandomData(modfile+"2", 256*1024)) + rtest.OK(t, os.Mkdir(modfile+"4", 0755)) + + testRunBackup(t, "", []string{datadir}, opts, env.gopts) + _, secondSnapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) + + return env, cleanup, firstSnapshotID, secondSnapshotID +} + +func TestDiff(t *testing.T) { + env, cleanup, firstSnapshotID, secondSnapshotID := setupDiffRepo(t) + defer cleanup() + + // quiet suppresses the diff output except for the summary + env.gopts.Quiet = false + _, err := testRunDiffOutput(env.gopts, "", secondSnapshotID) + rtest.Assert(t, err != nil, "expected error on invalid snapshot id") + + out, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID) + rtest.OK(t, err) + + for _, pattern := range diffOutputRegexPatterns { + r, err := regexp.Compile(pattern) + rtest.Assert(t, err == nil, "failed to compile regexp %v", pattern) + rtest.Assert(t, r.MatchString(out), "expected pattern %v in output, got\n%v", pattern, out) + } + + // check quiet output + env.gopts.Quiet = true + outQuiet, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID) + rtest.OK(t, err) + + rtest.Assert(t, len(outQuiet) < len(out), "expected shorter output on quiet mode %v vs. %v", len(outQuiet), len(out)) +} + +type typeSniffer struct { + MessageType string `json:"message_type"` +} + +func TestDiffJSON(t *testing.T) { + env, cleanup, firstSnapshotID, secondSnapshotID := setupDiffRepo(t) + defer cleanup() + + // quiet suppresses the diff output except for the summary + env.gopts.Quiet = false + env.gopts.JSON = true + out, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID) + rtest.OK(t, err) + + var stat DiffStatsContainer + var changes int + + scanner := bufio.NewScanner(strings.NewReader(out)) + for scanner.Scan() { + line := scanner.Text() + var sniffer typeSniffer + rtest.OK(t, json.Unmarshal([]byte(line), &sniffer)) + switch sniffer.MessageType { + case "change": + changes++ + case "statistics": + rtest.OK(t, json.Unmarshal([]byte(line), &stat)) + default: + t.Fatalf("unexpected message type %v", sniffer.MessageType) + } + } + rtest.Equals(t, 9, changes) + rtest.Assert(t, stat.Added.Files == 2 && stat.Added.Dirs == 3 && stat.Added.DataBlobs == 2 && + stat.Removed.Files == 1 && stat.Removed.Dirs == 2 && stat.Removed.DataBlobs == 1 && + stat.ChangedFiles == 1, "unexpected statistics") + + // check quiet output + env.gopts.Quiet = true + outQuiet, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID) + rtest.OK(t, err) + + stat = DiffStatsContainer{} + rtest.OK(t, json.Unmarshal([]byte(outQuiet), &stat)) + rtest.Assert(t, stat.Added.Files == 2 && stat.Added.Dirs == 3 && stat.Added.DataBlobs == 2 && + stat.Removed.Files == 1 && stat.Removed.Dirs == 2 && stat.Removed.DataBlobs == 1 && + stat.ChangedFiles == 1, "unexpected statistics") + rtest.Assert(t, stat.SourceSnapshot == firstSnapshotID && stat.TargetSnapshot == secondSnapshotID, "unexpected snapshot ids") +} diff --git a/cmd/restic/cmd_find_integration_test.go b/cmd/restic/cmd_find_integration_test.go new file mode 100644 index 000000000..0ee8839e7 --- /dev/null +++ b/cmd/restic/cmd_find_integration_test.go @@ -0,0 +1,94 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "os" + "strings" + "testing" + "time" + + rtest "github.com/restic/restic/internal/test" +) + +func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern string) []byte { + buf := bytes.NewBuffer(nil) + globalOptions.stdout = buf + globalOptions.JSON = wantJSON + defer func() { + globalOptions.stdout = os.Stdout + globalOptions.JSON = false + }() + + opts := FindOptions{} + + rtest.OK(t, runFind(context.TODO(), opts, gopts, []string{pattern})) + + return buf.Bytes() +} + +func TestFind(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := testSetupBackupData(t, env) + opts := BackupOptions{} + + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) + testRunCheck(t, env.gopts) + + results := testRunFind(t, false, env.gopts, "unexistingfile") + rtest.Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile) + + results = testRunFind(t, false, env.gopts, "testfile") + lines := strings.Split(string(results), "\n") + rtest.Assert(t, len(lines) == 2, "expected one file found in repo (%v)", datafile) + + results = testRunFind(t, false, env.gopts, "testfile*") + lines = strings.Split(string(results), "\n") + rtest.Assert(t, len(lines) == 4, "expected three files found in repo (%v)", datafile) +} + +type testMatch struct { + Path string `json:"path,omitempty"` + Permissions string `json:"permissions,omitempty"` + Size uint64 `json:"size,omitempty"` + Date time.Time `json:"date,omitempty"` + UID uint32 `json:"uid,omitempty"` + GID uint32 `json:"gid,omitempty"` +} + +type testMatches struct { + Hits int `json:"hits,omitempty"` + SnapshotID string `json:"snapshot,omitempty"` + Matches []testMatch `json:"matches,omitempty"` +} + +func TestFindJSON(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := testSetupBackupData(t, env) + opts := BackupOptions{} + + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) + testRunCheck(t, env.gopts) + + results := testRunFind(t, true, env.gopts, "unexistingfile") + matches := []testMatches{} + rtest.OK(t, json.Unmarshal(results, &matches)) + rtest.Assert(t, len(matches) == 0, "expected no match in repo (%v)", datafile) + + results = testRunFind(t, true, env.gopts, "testfile") + rtest.OK(t, json.Unmarshal(results, &matches)) + rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile) + rtest.Assert(t, len(matches[0].Matches) == 1, "expected a single file to match (%v)", datafile) + rtest.Assert(t, matches[0].Hits == 1, "expected hits to show 1 match (%v)", datafile) + + results = testRunFind(t, true, env.gopts, "testfile*") + rtest.OK(t, json.Unmarshal(results, &matches)) + rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile) + rtest.Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", datafile) + rtest.Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile) +} diff --git a/cmd/restic/cmd_forget_integration_test.go b/cmd/restic/cmd_forget_integration_test.go new file mode 100644 index 000000000..8908d5a5f --- /dev/null +++ b/cmd/restic/cmd_forget_integration_test.go @@ -0,0 +1,13 @@ +package main + +import ( + "context" + "testing" + + rtest "github.com/restic/restic/internal/test" +) + +func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) { + opts := ForgetOptions{} + rtest.OK(t, runForget(context.TODO(), opts, gopts, args)) +} diff --git a/cmd/restic/cmd_init_integration_test.go b/cmd/restic/cmd_init_integration_test.go new file mode 100644 index 000000000..9b5eed6e0 --- /dev/null +++ b/cmd/restic/cmd_init_integration_test.go @@ -0,0 +1,49 @@ +package main + +import ( + "context" + "testing" + + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func testRunInit(t testing.TB, opts GlobalOptions) { + repository.TestUseLowSecurityKDFParameters(t) + restic.TestDisableCheckPolynomial(t) + restic.TestSetLockTimeout(t, 0) + + rtest.OK(t, runInit(context.TODO(), InitOptions{}, opts, nil)) + t.Logf("repository initialized at %v", opts.Repo) +} + +func TestInitCopyChunkerParams(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + env2, cleanup2 := withTestEnvironment(t) + defer cleanup2() + + testRunInit(t, env2.gopts) + + initOpts := InitOptions{ + secondaryRepoOptions: secondaryRepoOptions{ + Repo: env2.gopts.Repo, + password: env2.gopts.password, + }, + } + rtest.Assert(t, runInit(context.TODO(), initOpts, env.gopts, nil) != nil, "expected invalid init options to fail") + + initOpts.CopyChunkerParameters = true + rtest.OK(t, runInit(context.TODO(), initOpts, env.gopts, nil)) + + repo, err := OpenRepository(context.TODO(), env.gopts) + rtest.OK(t, err) + + otherRepo, err := OpenRepository(context.TODO(), env2.gopts) + rtest.OK(t, err) + + rtest.Assert(t, repo.Config().ChunkerPolynomial == otherRepo.Config().ChunkerPolynomial, + "expected equal chunker polynomials, got %v expected %v", repo.Config().ChunkerPolynomial, + otherRepo.Config().ChunkerPolynomial) +} diff --git a/cmd/restic/cmd_key_integration_test.go b/cmd/restic/cmd_key_integration_test.go new file mode 100644 index 000000000..9e327d16c --- /dev/null +++ b/cmd/restic/cmd_key_integration_test.go @@ -0,0 +1,151 @@ +package main + +import ( + "bufio" + "bytes" + "context" + "os" + "regexp" + "testing" + + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func testRunKeyListOtherIDs(t testing.TB, gopts GlobalOptions) []string { + buf := bytes.NewBuffer(nil) + + globalOptions.stdout = buf + defer func() { + globalOptions.stdout = os.Stdout + }() + + rtest.OK(t, runKey(context.TODO(), gopts, []string{"list"})) + + scanner := bufio.NewScanner(buf) + exp := regexp.MustCompile(`^ ([a-f0-9]+) `) + + IDs := []string{} + for scanner.Scan() { + if id := exp.FindStringSubmatch(scanner.Text()); id != nil { + IDs = append(IDs, id[1]) + } + } + + return IDs +} + +func testRunKeyAddNewKey(t testing.TB, newPassword string, gopts GlobalOptions) { + testKeyNewPassword = newPassword + defer func() { + testKeyNewPassword = "" + }() + + rtest.OK(t, runKey(context.TODO(), gopts, []string{"add"})) +} + +func testRunKeyAddNewKeyUserHost(t testing.TB, gopts GlobalOptions) { + testKeyNewPassword = "john's geheimnis" + defer func() { + testKeyNewPassword = "" + keyUsername = "" + keyHostname = "" + }() + + rtest.OK(t, cmdKey.Flags().Parse([]string{"--user=john", "--host=example.com"})) + + t.Log("adding key for john@example.com") + rtest.OK(t, runKey(context.TODO(), gopts, []string{"add"})) + + repo, err := OpenRepository(context.TODO(), gopts) + rtest.OK(t, err) + key, err := repository.SearchKey(context.TODO(), repo, testKeyNewPassword, 2, "") + rtest.OK(t, err) + + rtest.Equals(t, "john", key.Username) + rtest.Equals(t, "example.com", key.Hostname) +} + +func testRunKeyPasswd(t testing.TB, newPassword string, gopts GlobalOptions) { + testKeyNewPassword = newPassword + defer func() { + testKeyNewPassword = "" + }() + + rtest.OK(t, runKey(context.TODO(), gopts, []string{"passwd"})) +} + +func testRunKeyRemove(t testing.TB, gopts GlobalOptions, IDs []string) { + t.Logf("remove %d keys: %q\n", len(IDs), IDs) + for _, id := range IDs { + rtest.OK(t, runKey(context.TODO(), gopts, []string{"remove", id})) + } +} + +func TestKeyAddRemove(t *testing.T) { + passwordList := []string{ + "OnnyiasyatvodsEvVodyawit", + "raicneirvOjEfEigonOmLasOd", + } + + env, cleanup := withTestEnvironment(t) + // must list keys more than once + env.gopts.backendTestHook = nil + defer cleanup() + + testRunInit(t, env.gopts) + + testRunKeyPasswd(t, "geheim2", env.gopts) + env.gopts.password = "geheim2" + t.Logf("changed password to %q", env.gopts.password) + + for _, newPassword := range passwordList { + testRunKeyAddNewKey(t, newPassword, env.gopts) + t.Logf("added new password %q", newPassword) + env.gopts.password = newPassword + testRunKeyRemove(t, env.gopts, testRunKeyListOtherIDs(t, env.gopts)) + } + + env.gopts.password = passwordList[len(passwordList)-1] + t.Logf("testing access with last password %q\n", env.gopts.password) + rtest.OK(t, runKey(context.TODO(), env.gopts, []string{"list"})) + testRunCheck(t, env.gopts) + + testRunKeyAddNewKeyUserHost(t, env.gopts) +} + +type emptySaveBackend struct { + restic.Backend +} + +func (b *emptySaveBackend) Save(ctx context.Context, h restic.Handle, _ restic.RewindReader) error { + return b.Backend.Save(ctx, h, restic.NewByteReader([]byte{}, nil)) +} + +func TestKeyProblems(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { + return &emptySaveBackend{r}, nil + } + + testKeyNewPassword = "geheim2" + defer func() { + testKeyNewPassword = "" + }() + + err := runKey(context.TODO(), env.gopts, []string{"passwd"}) + t.Log(err) + rtest.Assert(t, err != nil, "expected passwd change to fail") + + err = runKey(context.TODO(), env.gopts, []string{"add"}) + t.Log(err) + rtest.Assert(t, err != nil, "expected key adding to fail") + + t.Logf("testing access with initial password %q\n", env.gopts.password) + rtest.OK(t, runKey(context.TODO(), env.gopts, []string{"list"})) + testRunCheck(t, env.gopts) +} diff --git a/cmd/restic/cmd_list_integration_test.go b/cmd/restic/cmd_list_integration_test.go new file mode 100644 index 000000000..ce8ee4909 --- /dev/null +++ b/cmd/restic/cmd_list_integration_test.go @@ -0,0 +1,49 @@ +package main + +import ( + "bufio" + "bytes" + "context" + "io" + "os" + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs { + buf := bytes.NewBuffer(nil) + globalOptions.stdout = buf + defer func() { + globalOptions.stdout = os.Stdout + }() + + rtest.OK(t, runList(context.TODO(), cmdList, opts, []string{tpe})) + return parseIDsFromReader(t, buf) +} + +func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs { + t.Helper() + IDs := restic.IDs{} + sc := bufio.NewScanner(rd) + + for sc.Scan() { + id, err := restic.ParseID(sc.Text()) + if err != nil { + t.Logf("parse id %v: %v", sc.Text(), err) + continue + } + + IDs = append(IDs, id) + } + + return IDs +} + +func testListSnapshots(t testing.TB, opts GlobalOptions, expected int) restic.IDs { + t.Helper() + snapshotIDs := testRunList(t, "snapshots", opts) + rtest.Assert(t, len(snapshotIDs) == expected, "expected %v snapshot, got %v", expected, snapshotIDs) + return snapshotIDs +} diff --git a/cmd/restic/cmd_ls_integration_test.go b/cmd/restic/cmd_ls_integration_test.go new file mode 100644 index 000000000..0d2fd85db --- /dev/null +++ b/cmd/restic/cmd_ls_integration_test.go @@ -0,0 +1,28 @@ +package main + +import ( + "bytes" + "context" + "os" + "strings" + "testing" + + rtest "github.com/restic/restic/internal/test" +) + +func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string { + buf := bytes.NewBuffer(nil) + globalOptions.stdout = buf + quiet := globalOptions.Quiet + globalOptions.Quiet = true + defer func() { + globalOptions.stdout = os.Stdout + globalOptions.Quiet = quiet + }() + + opts := LsOptions{} + + rtest.OK(t, runLs(context.TODO(), opts, gopts, []string{snapshotID})) + + return strings.Split(buf.String(), "\n") +} diff --git a/cmd/restic/integration_fuse_test.go b/cmd/restic/cmd_mount_integration_test.go similarity index 100% rename from cmd/restic/integration_fuse_test.go rename to cmd/restic/cmd_mount_integration_test.go diff --git a/cmd/restic/cmd_prune_integration_test.go b/cmd/restic/cmd_prune_integration_test.go new file mode 100644 index 000000000..4a3ccd232 --- /dev/null +++ b/cmd/restic/cmd_prune_integration_test.go @@ -0,0 +1,229 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) { + oldHook := gopts.backendTestHook + gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil } + defer func() { + gopts.backendTestHook = oldHook + }() + rtest.OK(t, runPrune(context.TODO(), opts, gopts)) +} + +func TestPrune(t *testing.T) { + testPruneVariants(t, false) + testPruneVariants(t, true) +} + +func testPruneVariants(t *testing.T, unsafeNoSpaceRecovery bool) { + suffix := "" + if unsafeNoSpaceRecovery { + suffix = "-recovery" + } + t.Run("0"+suffix, func(t *testing.T) { + opts := PruneOptions{MaxUnused: "0%", unsafeRecovery: unsafeNoSpaceRecovery} + checkOpts := CheckOptions{ReadData: true, CheckUnused: true} + testPrune(t, opts, checkOpts) + }) + + t.Run("50"+suffix, func(t *testing.T) { + opts := PruneOptions{MaxUnused: "50%", unsafeRecovery: unsafeNoSpaceRecovery} + checkOpts := CheckOptions{ReadData: true} + testPrune(t, opts, checkOpts) + }) + + t.Run("unlimited"+suffix, func(t *testing.T) { + opts := PruneOptions{MaxUnused: "unlimited", unsafeRecovery: unsafeNoSpaceRecovery} + checkOpts := CheckOptions{ReadData: true} + testPrune(t, opts, checkOpts) + }) + + t.Run("CachableOnly"+suffix, func(t *testing.T) { + opts := PruneOptions{MaxUnused: "5%", RepackCachableOnly: true, unsafeRecovery: unsafeNoSpaceRecovery} + checkOpts := CheckOptions{ReadData: true} + testPrune(t, opts, checkOpts) + }) + t.Run("Small", func(t *testing.T) { + opts := PruneOptions{MaxUnused: "unlimited", RepackSmall: true} + checkOpts := CheckOptions{ReadData: true, CheckUnused: true} + testPrune(t, opts, checkOpts) + }) +} + +func createPrunableRepo(t *testing.T, env *testEnvironment) { + testSetupBackupData(t, env) + opts := BackupOptions{} + + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts) + firstSnapshot := testListSnapshots(t, env.gopts, 1)[0] + + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts) + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) + testListSnapshots(t, env.gopts, 3) + + testRunForgetJSON(t, env.gopts) + testRunForget(t, env.gopts, firstSnapshot.String()) +} + +func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) { + buf := bytes.NewBuffer(nil) + oldJSON := gopts.JSON + gopts.stdout = buf + gopts.JSON = true + defer func() { + gopts.stdout = os.Stdout + gopts.JSON = oldJSON + }() + + opts := ForgetOptions{ + DryRun: true, + Last: 1, + } + + rtest.OK(t, runForget(context.TODO(), opts, gopts, args)) + + var forgets []*ForgetGroup + rtest.OK(t, json.Unmarshal(buf.Bytes(), &forgets)) + + rtest.Assert(t, len(forgets) == 1, + "Expected 1 snapshot group, got %v", len(forgets)) + rtest.Assert(t, len(forgets[0].Keep) == 1, + "Expected 1 snapshot to be kept, got %v", len(forgets[0].Keep)) + rtest.Assert(t, len(forgets[0].Remove) == 2, + "Expected 2 snapshots to be removed, got %v", len(forgets[0].Remove)) +} + +func testPrune(t *testing.T, pruneOpts PruneOptions, checkOpts CheckOptions) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + createPrunableRepo(t, env) + testRunPrune(t, env.gopts, pruneOpts) + rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil)) +} + +var pruneDefaultOptions = PruneOptions{MaxUnused: "5%"} + +func TestPruneWithDamagedRepository(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := filepath.Join("testdata", "backup-data.tar.gz") + testRunInit(t, env.gopts) + + rtest.SetupTarTestFixture(t, env.testdata, datafile) + opts := BackupOptions{} + + // create and delete snapshot to create unused blobs + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts) + firstSnapshot := testListSnapshots(t, env.gopts, 1)[0] + testRunForget(t, env.gopts, firstSnapshot.String()) + + oldPacks := listPacks(env.gopts, t) + + // create new snapshot, but lose all data + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) + testListSnapshots(t, env.gopts, 1) + removePacksExcept(env.gopts, t, oldPacks, false) + + oldHook := env.gopts.backendTestHook + env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil } + defer func() { + env.gopts.backendTestHook = oldHook + }() + // prune should fail + rtest.Assert(t, runPrune(context.TODO(), pruneDefaultOptions, env.gopts) == errorPacksMissing, + "prune should have reported index not complete error") +} + +// Test repos for edge cases +func TestEdgeCaseRepos(t *testing.T) { + opts := CheckOptions{} + + // repo where index is completely missing + // => check and prune should fail + t.Run("no-index", func(t *testing.T) { + testEdgeCaseRepo(t, "repo-index-missing.tar.gz", opts, pruneDefaultOptions, false, false) + }) + + // repo where an existing and used blob is missing from the index + // => check and prune should fail + t.Run("index-missing-blob", func(t *testing.T) { + testEdgeCaseRepo(t, "repo-index-missing-blob.tar.gz", opts, pruneDefaultOptions, false, false) + }) + + // repo where a blob is missing + // => check and prune should fail + t.Run("missing-data", func(t *testing.T) { + testEdgeCaseRepo(t, "repo-data-missing.tar.gz", opts, pruneDefaultOptions, false, false) + }) + + // repo where blobs which are not needed are missing or in invalid pack files + // => check should fail and prune should repair this + t.Run("missing-unused-data", func(t *testing.T) { + testEdgeCaseRepo(t, "repo-unused-data-missing.tar.gz", opts, pruneDefaultOptions, false, true) + }) + + // repo where data exists that is not referenced + // => check and prune should fully work + t.Run("unreferenced-data", func(t *testing.T) { + testEdgeCaseRepo(t, "repo-unreferenced-data.tar.gz", opts, pruneDefaultOptions, true, true) + }) + + // repo where an obsolete index still exists + // => check and prune should fully work + t.Run("obsolete-index", func(t *testing.T) { + testEdgeCaseRepo(t, "repo-obsolete-index.tar.gz", opts, pruneDefaultOptions, true, true) + }) + + // repo which contains mixed (data/tree) packs + // => check and prune should fully work + t.Run("mixed-packs", func(t *testing.T) { + testEdgeCaseRepo(t, "repo-mixed.tar.gz", opts, pruneDefaultOptions, true, true) + }) + + // repo which contains duplicate blobs + // => checking for unused data should report an error and prune resolves the + // situation + opts = CheckOptions{ + ReadData: true, + CheckUnused: true, + } + t.Run("duplicates", func(t *testing.T) { + testEdgeCaseRepo(t, "repo-duplicates.tar.gz", opts, pruneDefaultOptions, false, true) + }) +} + +func testEdgeCaseRepo(t *testing.T, tarfile string, optionsCheck CheckOptions, optionsPrune PruneOptions, checkOK, pruneOK bool) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := filepath.Join("testdata", tarfile) + rtest.SetupTarTestFixture(t, env.base, datafile) + + if checkOK { + testRunCheck(t, env.gopts) + } else { + rtest.Assert(t, runCheck(context.TODO(), optionsCheck, env.gopts, nil) != nil, + "check should have reported an error") + } + + if pruneOK { + testRunPrune(t, env.gopts, optionsPrune) + testRunCheck(t, env.gopts) + } else { + rtest.Assert(t, runPrune(context.TODO(), optionsPrune, env.gopts) != nil, + "prune should have reported an error") + } +} diff --git a/cmd/restic/cmd_repair_index_integration_test.go b/cmd/restic/cmd_repair_index_integration_test.go new file mode 100644 index 000000000..a5711da84 --- /dev/null +++ b/cmd/restic/cmd_repair_index_integration_test.go @@ -0,0 +1,143 @@ +package main + +import ( + "context" + "io" + "os" + "path/filepath" + "strings" + "sync" + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/index" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) { + globalOptions.stdout = io.Discard + defer func() { + globalOptions.stdout = os.Stdout + }() + + rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts)) +} + +func testRebuildIndex(t *testing.T, backendTestHook backendWrapper) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz") + rtest.SetupTarTestFixture(t, env.base, datafile) + + out, err := testRunCheckOutput(env.gopts, false) + if !strings.Contains(out, "contained in several indexes") { + t.Fatalf("did not find checker hint for packs in several indexes") + } + + if err != nil { + t.Fatalf("expected no error from checker for test repository, got %v", err) + } + + if !strings.Contains(out, "restic repair index") { + t.Fatalf("did not find hint for repair index command") + } + + env.gopts.backendTestHook = backendTestHook + testRunRebuildIndex(t, env.gopts) + + env.gopts.backendTestHook = nil + out, err = testRunCheckOutput(env.gopts, false) + if len(out) != 0 { + t.Fatalf("expected no output from the checker, got: %v", out) + } + + if err != nil { + t.Fatalf("expected no error from checker after repair index, got: %v", err) + } +} + +func TestRebuildIndex(t *testing.T) { + testRebuildIndex(t, nil) +} + +func TestRebuildIndexAlwaysFull(t *testing.T) { + indexFull := index.IndexFull + defer func() { + index.IndexFull = indexFull + }() + index.IndexFull = func(*index.Index, bool) bool { return true } + testRebuildIndex(t, nil) +} + +// indexErrorBackend modifies the first index after reading. +type indexErrorBackend struct { + restic.Backend + lock sync.Mutex + hasErred bool +} + +func (b *indexErrorBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) error { + return b.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error { + // protect hasErred + b.lock.Lock() + defer b.lock.Unlock() + if !b.hasErred && h.Type == restic.IndexFile { + b.hasErred = true + return consumer(errorReadCloser{rd}) + } + return consumer(rd) + }) +} + +type errorReadCloser struct { + io.Reader +} + +func (erd errorReadCloser) Read(p []byte) (int, error) { + n, err := erd.Reader.Read(p) + if n > 0 { + p[0] ^= 1 + } + return n, err +} + +func TestRebuildIndexDamage(t *testing.T) { + testRebuildIndex(t, func(r restic.Backend) (restic.Backend, error) { + return &indexErrorBackend{ + Backend: r, + }, nil + }) +} + +type appendOnlyBackend struct { + restic.Backend +} + +// called via repo.Backend().Remove() +func (b *appendOnlyBackend) Remove(_ context.Context, h restic.Handle) error { + return errors.Errorf("Failed to remove %v", h) +} + +func TestRebuildIndexFailsOnAppendOnly(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz") + rtest.SetupTarTestFixture(t, env.base, datafile) + + globalOptions.stdout = io.Discard + defer func() { + globalOptions.stdout = os.Stdout + }() + + env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { + return &appendOnlyBackend{r}, nil + } + err := runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts) + if err == nil { + t.Error("expected rebuildIndex to fail") + } + t.Log(err) +} diff --git a/cmd/restic/integration_repair_snapshots_test.go b/cmd/restic/cmd_repair_snapshots_integration_test.go similarity index 100% rename from cmd/restic/integration_repair_snapshots_test.go rename to cmd/restic/cmd_repair_snapshots_integration_test.go diff --git a/cmd/restic/cmd_restore_integration_test.go b/cmd/restic/cmd_restore_integration_test.go new file mode 100644 index 000000000..266b0c2f6 --- /dev/null +++ b/cmd/restic/cmd_restore_integration_test.go @@ -0,0 +1,305 @@ +package main + +import ( + "context" + "fmt" + "io" + mrand "math/rand" + "os" + "path/filepath" + "syscall" + "testing" + "time" + + "github.com/restic/restic/internal/filter" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID restic.ID) { + testRunRestoreExcludes(t, opts, dir, snapshotID, nil) +} + +func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) { + opts := RestoreOptions{ + Target: dir, + Exclude: excludes, + } + + rtest.OK(t, runRestore(context.TODO(), opts, gopts, nil, []string{snapshotID.String()})) +} + +func testRunRestoreAssumeFailure(snapshotID string, opts RestoreOptions, gopts GlobalOptions) error { + return runRestore(context.TODO(), opts, gopts, nil, []string{snapshotID}) +} + +func testRunRestoreLatest(t testing.TB, gopts GlobalOptions, dir string, paths []string, hosts []string) { + opts := RestoreOptions{ + Target: dir, + SnapshotFilter: restic.SnapshotFilter{ + Hosts: hosts, + Paths: paths, + }, + } + + rtest.OK(t, runRestore(context.TODO(), opts, gopts, nil, []string{"latest"})) +} + +func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includes []string) { + opts := RestoreOptions{ + Target: dir, + Include: includes, + } + + rtest.OK(t, runRestore(context.TODO(), opts, gopts, nil, []string{snapshotID.String()})) +} + +func TestRestoreFilter(t *testing.T) { + testfiles := []struct { + name string + size uint + }{ + {"testfile1.c", 100}, + {"testfile2.exe", 101}, + {"subdir1/subdir2/testfile3.docx", 102}, + {"subdir1/subdir2/testfile4.c", 102}, + } + + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + for _, testFile := range testfiles { + p := filepath.Join(env.testdata, testFile.name) + rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) + rtest.OK(t, appendRandomData(p, testFile.size)) + } + + opts := BackupOptions{} + + testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + testRunCheck(t, env.gopts) + + snapshotID := testListSnapshots(t, env.gopts, 1)[0] + + // no restore filter should restore all files + testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID) + for _, testFile := range testfiles { + rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", testFile.name), int64(testFile.size))) + } + + for i, pat := range []string{"*.c", "*.exe", "*", "*file3*"} { + base := filepath.Join(env.base, fmt.Sprintf("restore%d", i+1)) + testRunRestoreExcludes(t, env.gopts, base, snapshotID, []string{pat}) + for _, testFile := range testfiles { + err := testFileSize(filepath.Join(base, "testdata", testFile.name), int64(testFile.size)) + if ok, _ := filter.Match(pat, filepath.Base(testFile.name)); !ok { + rtest.OK(t, err) + } else { + rtest.Assert(t, os.IsNotExist(err), + "expected %v to not exist in restore step %v, but it exists, err %v", testFile.name, i+1, err) + } + } + } +} + +func TestRestore(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + for i := 0; i < 10; i++ { + p := filepath.Join(env.testdata, fmt.Sprintf("foo/bar/testfile%v", i)) + rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) + rtest.OK(t, appendRandomData(p, uint(mrand.Intn(2<<21)))) + } + + opts := BackupOptions{} + + testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + testRunCheck(t, env.gopts) + + // Restore latest without any filters + restoredir := filepath.Join(env.base, "restore") + testRunRestoreLatest(t, env.gopts, restoredir, nil, nil) + + diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, filepath.Base(env.testdata))) + rtest.Assert(t, diff == "", "directories are not equal %v", diff) +} + +func TestRestoreLatest(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + p := filepath.Join(env.testdata, "testfile.c") + rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) + rtest.OK(t, appendRandomData(p, 100)) + + opts := BackupOptions{} + + // chdir manually here so we can get the current directory. This is not the + // same as the temp dir returned by os.MkdirTemp() on darwin. + back := rtest.Chdir(t, filepath.Dir(env.testdata)) + defer back() + + curdir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts) + testRunCheck(t, env.gopts) + + rtest.OK(t, os.Remove(p)) + rtest.OK(t, appendRandomData(p, 101)) + testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts) + testRunCheck(t, env.gopts) + + // Restore latest without any filters + testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore0"), nil, nil) + rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", "testfile.c"), int64(101))) + + // Setup test files in different directories backed up in different snapshots + p1 := filepath.Join(curdir, filepath.FromSlash("p1/testfile.c")) + + rtest.OK(t, os.MkdirAll(filepath.Dir(p1), 0755)) + rtest.OK(t, appendRandomData(p1, 102)) + testRunBackup(t, "", []string{"p1"}, opts, env.gopts) + testRunCheck(t, env.gopts) + + p2 := filepath.Join(curdir, filepath.FromSlash("p2/testfile.c")) + + rtest.OK(t, os.MkdirAll(filepath.Dir(p2), 0755)) + rtest.OK(t, appendRandomData(p2, 103)) + testRunBackup(t, "", []string{"p2"}, opts, env.gopts) + testRunCheck(t, env.gopts) + + p1rAbs := filepath.Join(env.base, "restore1", "p1/testfile.c") + p2rAbs := filepath.Join(env.base, "restore2", "p2/testfile.c") + + testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore1"), []string{filepath.Dir(p1)}, nil) + rtest.OK(t, testFileSize(p1rAbs, int64(102))) + if _, err := os.Stat(p2rAbs); os.IsNotExist(err) { + rtest.Assert(t, os.IsNotExist(err), + "expected %v to not exist in restore, but it exists, err %v", p2rAbs, err) + } + + testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore2"), []string{filepath.Dir(p2)}, nil) + rtest.OK(t, testFileSize(p2rAbs, int64(103))) + if _, err := os.Stat(p1rAbs); os.IsNotExist(err) { + rtest.Assert(t, os.IsNotExist(err), + "expected %v to not exist in restore, but it exists, err %v", p1rAbs, err) + } +} + +func TestRestoreWithPermissionFailure(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := filepath.Join("testdata", "repo-restore-permissions-test.tar.gz") + rtest.SetupTarTestFixture(t, env.base, datafile) + + snapshots := testListSnapshots(t, env.gopts, 1) + + globalOptions.stderr = io.Discard + defer func() { + globalOptions.stderr = os.Stderr + }() + + testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0]) + + // make sure that all files have been restored, regardless of any + // permission errors + files := testRunLs(t, env.gopts, snapshots[0].String()) + for _, filename := range files { + fi, err := os.Lstat(filepath.Join(env.base, "restore", filename)) + rtest.OK(t, err) + + rtest.Assert(t, !isFile(fi) || fi.Size() > 0, + "file %v restored, but filesize is 0", filename) + } +} + +func setZeroModTime(filename string) error { + var utimes = []syscall.Timespec{ + syscall.NsecToTimespec(0), + syscall.NsecToTimespec(0), + } + + return syscall.UtimesNano(filename, utimes) +} + +func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + p := filepath.Join(env.testdata, "subdir1", "subdir2", "subdir3", "file.ext") + rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) + rtest.OK(t, appendRandomData(p, 200)) + rtest.OK(t, setZeroModTime(filepath.Join(env.testdata, "subdir1", "subdir2"))) + + opts := BackupOptions{} + + testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + testRunCheck(t, env.gopts) + + snapshotID := testListSnapshots(t, env.gopts, 1)[0] + + // restore with filter "*.ext", this should restore "file.ext", but + // since the directories are ignored and only created because of + // "file.ext", no meta data should be restored for them. + testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID, []string{"*.ext"}) + + f1 := filepath.Join(env.base, "restore0", "testdata", "subdir1", "subdir2") + _, err := os.Stat(f1) + rtest.OK(t, err) + + // restore with filter "*", this should restore meta data on everything. + testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore1"), snapshotID, []string{"*"}) + + f2 := filepath.Join(env.base, "restore1", "testdata", "subdir1", "subdir2") + fi, err := os.Stat(f2) + rtest.OK(t, err) + + rtest.Assert(t, fi.ModTime() == time.Unix(0, 0), + "meta data of intermediate directory hasn't been restore") +} + +func TestRestoreLocalLayout(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + var tests = []struct { + filename string + layout string + }{ + {"repo-layout-default.tar.gz", ""}, + {"repo-layout-s3legacy.tar.gz", ""}, + {"repo-layout-default.tar.gz", "default"}, + {"repo-layout-s3legacy.tar.gz", "s3legacy"}, + } + + for _, test := range tests { + datafile := filepath.Join("..", "..", "internal", "backend", "testdata", test.filename) + + rtest.SetupTarTestFixture(t, env.base, datafile) + + env.gopts.extended["local.layout"] = test.layout + + // check the repo + testRunCheck(t, env.gopts) + + // restore latest snapshot + target := filepath.Join(env.base, "restore") + testRunRestoreLatest(t, env.gopts, target, nil, nil) + + rtest.RemoveAll(t, filepath.Join(env.base, "repo")) + rtest.RemoveAll(t, target) + } +} diff --git a/cmd/restic/integration_rewrite_test.go b/cmd/restic/cmd_rewrite_integration_test.go similarity index 100% rename from cmd/restic/integration_rewrite_test.go rename to cmd/restic/cmd_rewrite_integration_test.go diff --git a/cmd/restic/cmd_snapshots_integration_test.go b/cmd/restic/cmd_snapshots_integration_test.go new file mode 100644 index 000000000..607f0bf6b --- /dev/null +++ b/cmd/restic/cmd_snapshots_integration_test.go @@ -0,0 +1,38 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "os" + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func testRunSnapshots(t testing.TB, gopts GlobalOptions) (newest *Snapshot, snapmap map[restic.ID]Snapshot) { + buf := bytes.NewBuffer(nil) + globalOptions.stdout = buf + globalOptions.JSON = true + defer func() { + globalOptions.stdout = os.Stdout + globalOptions.JSON = gopts.JSON + }() + + opts := SnapshotOptions{} + + rtest.OK(t, runSnapshots(context.TODO(), opts, globalOptions, []string{})) + + snapshots := []Snapshot{} + rtest.OK(t, json.Unmarshal(buf.Bytes(), &snapshots)) + + snapmap = make(map[restic.ID]Snapshot, len(snapshots)) + for _, sn := range snapshots { + snapmap[*sn.ID] = sn + if newest == nil || sn.Time.After(newest.Time) { + newest = &sn + } + } + return +} diff --git a/cmd/restic/cmd_tag_integration_test.go b/cmd/restic/cmd_tag_integration_test.go new file mode 100644 index 000000000..3b902c51e --- /dev/null +++ b/cmd/restic/cmd_tag_integration_test.go @@ -0,0 +1,94 @@ +package main + +import ( + "context" + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) { + rtest.OK(t, runTag(context.TODO(), opts, gopts, []string{})) +} + +func TestTag(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testSetupBackupData(t, env) + testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts) + testRunCheck(t, env.gopts) + newest, _ := testRunSnapshots(t, env.gopts) + if newest == nil { + t.Fatal("expected a new backup, got nil") + } + + rtest.Assert(t, len(newest.Tags) == 0, + "expected no tags, got %v", newest.Tags) + rtest.Assert(t, newest.Original == nil, + "expected original ID to be nil, got %v", newest.Original) + originalID := *newest.ID + + testRunTag(t, TagOptions{SetTags: restic.TagLists{[]string{"NL"}}}, env.gopts) + testRunCheck(t, env.gopts) + newest, _ = testRunSnapshots(t, env.gopts) + if newest == nil { + t.Fatal("expected a backup, got nil") + } + rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL", + "set failed, expected one NL tag, got %v", newest.Tags) + rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil") + rtest.Assert(t, *newest.Original == originalID, + "expected original ID to be set to the first snapshot id") + + testRunTag(t, TagOptions{AddTags: restic.TagLists{[]string{"CH"}}}, env.gopts) + testRunCheck(t, env.gopts) + newest, _ = testRunSnapshots(t, env.gopts) + if newest == nil { + t.Fatal("expected a backup, got nil") + } + rtest.Assert(t, len(newest.Tags) == 2 && newest.Tags[0] == "NL" && newest.Tags[1] == "CH", + "add failed, expected CH,NL tags, got %v", newest.Tags) + rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil") + rtest.Assert(t, *newest.Original == originalID, + "expected original ID to be set to the first snapshot id") + + testRunTag(t, TagOptions{RemoveTags: restic.TagLists{[]string{"NL"}}}, env.gopts) + testRunCheck(t, env.gopts) + newest, _ = testRunSnapshots(t, env.gopts) + if newest == nil { + t.Fatal("expected a backup, got nil") + } + rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "CH", + "remove failed, expected one CH tag, got %v", newest.Tags) + rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil") + rtest.Assert(t, *newest.Original == originalID, + "expected original ID to be set to the first snapshot id") + + testRunTag(t, TagOptions{AddTags: restic.TagLists{[]string{"US", "RU"}}}, env.gopts) + testRunTag(t, TagOptions{RemoveTags: restic.TagLists{[]string{"CH", "US", "RU"}}}, env.gopts) + testRunCheck(t, env.gopts) + newest, _ = testRunSnapshots(t, env.gopts) + if newest == nil { + t.Fatal("expected a backup, got nil") + } + rtest.Assert(t, len(newest.Tags) == 0, + "expected no tags, got %v", newest.Tags) + rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil") + rtest.Assert(t, *newest.Original == originalID, + "expected original ID to be set to the first snapshot id") + + // Check special case of removing all tags. + testRunTag(t, TagOptions{SetTags: restic.TagLists{[]string{""}}}, env.gopts) + testRunCheck(t, env.gopts) + newest, _ = testRunSnapshots(t, env.gopts) + if newest == nil { + t.Fatal("expected a backup, got nil") + } + rtest.Assert(t, len(newest.Tags) == 0, + "expected no tags, got %v", newest.Tags) + rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil") + rtest.Assert(t, *newest.Original == originalID, + "expected original ID to be set to the first snapshot id") +} diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go index 655aa9335..59d9e30d3 100644 --- a/cmd/restic/integration_helpers_test.go +++ b/cmd/restic/integration_helpers_test.go @@ -2,13 +2,17 @@ package main import ( "bytes" + "context" + "crypto/rand" "fmt" + "io" "os" "path/filepath" "runtime" "testing" "github.com/restic/restic/internal/backend/retry" + "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/options" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" @@ -215,3 +219,122 @@ func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) { return env, cleanup } + +func testSetupBackupData(t testing.TB, env *testEnvironment) string { + datafile := filepath.Join("testdata", "backup-data.tar.gz") + testRunInit(t, env.gopts) + rtest.SetupTarTestFixture(t, env.testdata, datafile) + return datafile +} + +func listPacks(gopts GlobalOptions, t *testing.T) restic.IDSet { + r, err := OpenRepository(context.TODO(), gopts) + rtest.OK(t, err) + + packs := restic.NewIDSet() + + rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { + packs.Insert(id) + return nil + })) + return packs +} + +func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) { + r, err := OpenRepository(context.TODO(), gopts) + rtest.OK(t, err) + + for id := range remove { + rtest.OK(t, r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()})) + } +} + +func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, removeTreePacks bool) { + r, err := OpenRepository(context.TODO(), gopts) + rtest.OK(t, err) + + // Get all tree packs + rtest.OK(t, r.LoadIndex(context.TODO())) + + treePacks := restic.NewIDSet() + r.Index().Each(context.TODO(), func(pb restic.PackedBlob) { + if pb.Type == restic.TreeBlob { + treePacks.Insert(pb.PackID) + } + }) + + // remove all packs containing data blobs + rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { + if treePacks.Has(id) != removeTreePacks || keep.Has(id) { + return nil + } + return r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()}) + })) +} + +func includes(haystack []string, needle string) bool { + for _, s := range haystack { + if s == needle { + return true + } + } + + return false +} + +func loadSnapshotMap(t testing.TB, gopts GlobalOptions) map[string]struct{} { + snapshotIDs := testRunList(t, "snapshots", gopts) + + m := make(map[string]struct{}) + for _, id := range snapshotIDs { + m[id.String()] = struct{}{} + } + + return m +} + +func lastSnapshot(old, new map[string]struct{}) (map[string]struct{}, string) { + for k := range new { + if _, ok := old[k]; !ok { + old[k] = struct{}{} + return old, k + } + } + + return old, "" +} + +func appendRandomData(filename string, bytes uint) error { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + fmt.Fprint(os.Stderr, err) + return err + } + + _, err = f.Seek(0, 2) + if err != nil { + fmt.Fprint(os.Stderr, err) + return err + } + + _, err = io.Copy(f, io.LimitReader(rand.Reader, int64(bytes))) + if err != nil { + fmt.Fprint(os.Stderr, err) + return err + } + + return f.Close() +} + +func testFileSize(filename string, size int64) error { + fi, err := os.Stat(filename) + if err != nil { + return err + } + + if fi.Size() != size { + return errors.Fatalf("wrong file size for %v: expected %v, got %v", filename, size, fi.Size()) + } + + return nil +} diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index ef4ccb01e..8ea4d17d9 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -1,1605 +1,18 @@ package main import ( - "bufio" - "bytes" "context" - "crypto/rand" - "encoding/json" "fmt" "io" - mrand "math/rand" "os" "path/filepath" - "regexp" - "runtime" - "strings" - "sync" - "syscall" "testing" - "time" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/filter" - "github.com/restic/restic/internal/fs" - "github.com/restic/restic/internal/index" - "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" - "github.com/restic/restic/internal/ui/termstatus" - "golang.org/x/sync/errgroup" ) -func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs { - t.Helper() - IDs := restic.IDs{} - sc := bufio.NewScanner(rd) - - for sc.Scan() { - id, err := restic.ParseID(sc.Text()) - if err != nil { - t.Logf("parse id %v: %v", sc.Text(), err) - continue - } - - IDs = append(IDs, id) - } - - return IDs -} - -func testRunInit(t testing.TB, opts GlobalOptions) { - repository.TestUseLowSecurityKDFParameters(t) - restic.TestDisableCheckPolynomial(t) - restic.TestSetLockTimeout(t, 0) - - rtest.OK(t, runInit(context.TODO(), InitOptions{}, opts, nil)) - t.Logf("repository initialized at %v", opts.Repo) -} - -func testRunBackupAssumeFailure(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) error { - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - var wg errgroup.Group - term := termstatus.New(gopts.stdout, gopts.stderr, gopts.Quiet) - wg.Go(func() error { term.Run(ctx); return nil }) - - gopts.stdout = io.Discard - t.Logf("backing up %v in %v", target, dir) - if dir != "" { - cleanup := rtest.Chdir(t, dir) - defer cleanup() - } - - opts.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true} - backupErr := runBackup(ctx, opts, gopts, term, target) - - cancel() - - err := wg.Wait() - if err != nil { - t.Fatal(err) - } - - return backupErr -} - -func testRunBackup(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) { - err := testRunBackupAssumeFailure(t, dir, target, opts, gopts) - rtest.Assert(t, err == nil, "Error while backing up") -} - -func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs { - buf := bytes.NewBuffer(nil) - globalOptions.stdout = buf - defer func() { - globalOptions.stdout = os.Stdout - }() - - rtest.OK(t, runList(context.TODO(), cmdList, opts, []string{tpe})) - return parseIDsFromReader(t, buf) -} - -func testListSnapshots(t testing.TB, opts GlobalOptions, expected int) restic.IDs { - t.Helper() - snapshotIDs := testRunList(t, "snapshots", opts) - rtest.Assert(t, len(snapshotIDs) == expected, "expected %v snapshot, got %v", expected, snapshotIDs) - return snapshotIDs -} - -func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID restic.ID) { - testRunRestoreExcludes(t, opts, dir, snapshotID, nil) -} - -func testRunRestoreLatest(t testing.TB, gopts GlobalOptions, dir string, paths []string, hosts []string) { - opts := RestoreOptions{ - Target: dir, - SnapshotFilter: restic.SnapshotFilter{ - Hosts: hosts, - Paths: paths, - }, - } - - rtest.OK(t, runRestore(context.TODO(), opts, gopts, nil, []string{"latest"})) -} - -func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) { - opts := RestoreOptions{ - Target: dir, - Exclude: excludes, - } - - rtest.OK(t, runRestore(context.TODO(), opts, gopts, nil, []string{snapshotID.String()})) -} - -func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includes []string) { - opts := RestoreOptions{ - Target: dir, - Include: includes, - } - - rtest.OK(t, runRestore(context.TODO(), opts, gopts, nil, []string{snapshotID.String()})) -} - -func testRunRestoreAssumeFailure(snapshotID string, opts RestoreOptions, gopts GlobalOptions) error { - err := runRestore(context.TODO(), opts, gopts, nil, []string{snapshotID}) - - return err -} - -func testRunCheck(t testing.TB, gopts GlobalOptions) { - t.Helper() - output, err := testRunCheckOutput(gopts, true) - if err != nil { - t.Error(output) - t.Fatalf("unexpected error: %+v", err) - } -} - -func testRunCheckMustFail(t testing.TB, gopts GlobalOptions) { - t.Helper() - _, err := testRunCheckOutput(gopts, false) - rtest.Assert(t, err != nil, "expected non nil error after check of damaged repository") -} - -func testRunCheckOutput(gopts GlobalOptions, checkUnused bool) (string, error) { - buf := bytes.NewBuffer(nil) - - globalOptions.stdout = buf - defer func() { - globalOptions.stdout = os.Stdout - }() - - opts := CheckOptions{ - ReadData: true, - CheckUnused: checkUnused, - } - - err := runCheck(context.TODO(), opts, gopts, nil) - return buf.String(), err -} - -func testRunDiffOutput(gopts GlobalOptions, firstSnapshotID string, secondSnapshotID string) (string, error) { - buf := bytes.NewBuffer(nil) - - globalOptions.stdout = buf - oldStdout := gopts.stdout - gopts.stdout = buf - defer func() { - globalOptions.stdout = os.Stdout - gopts.stdout = oldStdout - }() - - opts := DiffOptions{ - ShowMetadata: false, - } - err := runDiff(context.TODO(), opts, gopts, []string{firstSnapshotID, secondSnapshotID}) - return buf.String(), err -} - -func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) { - globalOptions.stdout = io.Discard - defer func() { - globalOptions.stdout = os.Stdout - }() - - rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts)) -} - -func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string { - buf := bytes.NewBuffer(nil) - globalOptions.stdout = buf - quiet := globalOptions.Quiet - globalOptions.Quiet = true - defer func() { - globalOptions.stdout = os.Stdout - globalOptions.Quiet = quiet - }() - - opts := LsOptions{} - - rtest.OK(t, runLs(context.TODO(), opts, gopts, []string{snapshotID})) - - return strings.Split(buf.String(), "\n") -} - -func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern string) []byte { - buf := bytes.NewBuffer(nil) - globalOptions.stdout = buf - globalOptions.JSON = wantJSON - defer func() { - globalOptions.stdout = os.Stdout - globalOptions.JSON = false - }() - - opts := FindOptions{} - - rtest.OK(t, runFind(context.TODO(), opts, gopts, []string{pattern})) - - return buf.Bytes() -} - -func testRunSnapshots(t testing.TB, gopts GlobalOptions) (newest *Snapshot, snapmap map[restic.ID]Snapshot) { - buf := bytes.NewBuffer(nil) - globalOptions.stdout = buf - globalOptions.JSON = true - defer func() { - globalOptions.stdout = os.Stdout - globalOptions.JSON = gopts.JSON - }() - - opts := SnapshotOptions{} - - rtest.OK(t, runSnapshots(context.TODO(), opts, globalOptions, []string{})) - - snapshots := []Snapshot{} - rtest.OK(t, json.Unmarshal(buf.Bytes(), &snapshots)) - - snapmap = make(map[restic.ID]Snapshot, len(snapshots)) - for _, sn := range snapshots { - snapmap[*sn.ID] = sn - if newest == nil || sn.Time.After(newest.Time) { - newest = &sn - } - } - return -} - -func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) { - opts := ForgetOptions{} - rtest.OK(t, runForget(context.TODO(), opts, gopts, args)) -} - -func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) { - buf := bytes.NewBuffer(nil) - oldJSON := gopts.JSON - gopts.stdout = buf - gopts.JSON = true - defer func() { - gopts.stdout = os.Stdout - gopts.JSON = oldJSON - }() - - opts := ForgetOptions{ - DryRun: true, - Last: 1, - } - - rtest.OK(t, runForget(context.TODO(), opts, gopts, args)) - - var forgets []*ForgetGroup - rtest.OK(t, json.Unmarshal(buf.Bytes(), &forgets)) - - rtest.Assert(t, len(forgets) == 1, - "Expected 1 snapshot group, got %v", len(forgets)) - rtest.Assert(t, len(forgets[0].Keep) == 1, - "Expected 1 snapshot to be kept, got %v", len(forgets[0].Keep)) - rtest.Assert(t, len(forgets[0].Remove) == 2, - "Expected 2 snapshots to be removed, got %v", len(forgets[0].Remove)) -} - -func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) { - oldHook := gopts.backendTestHook - gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil } - defer func() { - gopts.backendTestHook = oldHook - }() - rtest.OK(t, runPrune(context.TODO(), opts, gopts)) -} - -func testSetupBackupData(t testing.TB, env *testEnvironment) string { - datafile := filepath.Join("testdata", "backup-data.tar.gz") - testRunInit(t, env.gopts) - rtest.SetupTarTestFixture(t, env.testdata, datafile) - return datafile -} - -func TestBackup(t *testing.T) { - testBackup(t, false) -} - -func TestBackupWithFilesystemSnapshots(t *testing.T) { - if runtime.GOOS == "windows" && fs.HasSufficientPrivilegesForVSS() == nil { - testBackup(t, true) - } -} - -func testBackup(t *testing.T, useFsSnapshot bool) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - testSetupBackupData(t, env) - opts := BackupOptions{UseFsSnapshot: useFsSnapshot} - - // first backup - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) - snapshotIDs := testListSnapshots(t, env.gopts, 1) - - testRunCheck(t, env.gopts) - stat1 := dirStats(env.repo) - - // second backup, implicit incremental - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) - snapshotIDs = testListSnapshots(t, env.gopts, 2) - - stat2 := dirStats(env.repo) - if stat2.size > stat1.size+stat1.size/10 { - t.Error("repository size has grown by more than 10 percent") - } - t.Logf("repository grown by %d bytes", stat2.size-stat1.size) - - testRunCheck(t, env.gopts) - // third backup, explicit incremental - opts.Parent = snapshotIDs[0].String() - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) - snapshotIDs = testListSnapshots(t, env.gopts, 3) - - stat3 := dirStats(env.repo) - if stat3.size > stat1.size+stat1.size/10 { - t.Error("repository size has grown by more than 10 percent") - } - t.Logf("repository grown by %d bytes", stat3.size-stat2.size) - - // restore all backups and compare - for i, snapshotID := range snapshotIDs { - restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) - t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir) - testRunRestore(t, env.gopts, restoredir, snapshotID) - diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata")) - rtest.Assert(t, diff == "", "directories are not equal: %v", diff) - } - - testRunCheck(t, env.gopts) -} - -func TestBackupWithRelativePath(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - testSetupBackupData(t, env) - opts := BackupOptions{} - - // first backup - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) - firstSnapshotID := testListSnapshots(t, env.gopts, 1)[0] - - // second backup, implicit incremental - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) - - // that the correct parent snapshot was used - latestSn, _ := testRunSnapshots(t, env.gopts) - rtest.Assert(t, latestSn != nil, "missing latest snapshot") - rtest.Assert(t, latestSn.Parent != nil && latestSn.Parent.Equal(firstSnapshotID), "second snapshot selected unexpected parent %v instead of %v", latestSn.Parent, firstSnapshotID) -} - -func TestBackupParentSelection(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - testSetupBackupData(t, env) - opts := BackupOptions{} - - // first backup - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/0"}, opts, env.gopts) - firstSnapshotID := testListSnapshots(t, env.gopts, 1)[0] - - // second backup, sibling path - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/tests"}, opts, env.gopts) - testListSnapshots(t, env.gopts, 2) - - // third backup, incremental for the first backup - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/0"}, opts, env.gopts) - - // test that the correct parent snapshot was used - latestSn, _ := testRunSnapshots(t, env.gopts) - rtest.Assert(t, latestSn != nil, "missing latest snapshot") - rtest.Assert(t, latestSn.Parent != nil && latestSn.Parent.Equal(firstSnapshotID), "third snapshot selected unexpected parent %v instead of %v", latestSn.Parent, firstSnapshotID) -} - -func TestDryRunBackup(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - testSetupBackupData(t, env) - opts := BackupOptions{} - dryOpts := BackupOptions{DryRun: true} - - // dry run before first backup - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts) - snapshotIDs := testListSnapshots(t, env.gopts, 0) - packIDs := testRunList(t, "packs", env.gopts) - rtest.Assert(t, len(packIDs) == 0, - "expected no data, got %v", snapshotIDs) - indexIDs := testRunList(t, "index", env.gopts) - rtest.Assert(t, len(indexIDs) == 0, - "expected no index, got %v", snapshotIDs) - - // first backup - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) - snapshotIDs = testListSnapshots(t, env.gopts, 1) - packIDs = testRunList(t, "packs", env.gopts) - indexIDs = testRunList(t, "index", env.gopts) - - // dry run between backups - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts) - snapshotIDsAfter := testListSnapshots(t, env.gopts, 1) - rtest.Equals(t, snapshotIDs, snapshotIDsAfter) - dataIDsAfter := testRunList(t, "packs", env.gopts) - rtest.Equals(t, packIDs, dataIDsAfter) - indexIDsAfter := testRunList(t, "index", env.gopts) - rtest.Equals(t, indexIDs, indexIDsAfter) - - // second backup, implicit incremental - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) - snapshotIDs = testListSnapshots(t, env.gopts, 2) - packIDs = testRunList(t, "packs", env.gopts) - indexIDs = testRunList(t, "index", env.gopts) - - // another dry run - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts) - snapshotIDsAfter = testListSnapshots(t, env.gopts, 2) - rtest.Equals(t, snapshotIDs, snapshotIDsAfter) - dataIDsAfter = testRunList(t, "packs", env.gopts) - rtest.Equals(t, packIDs, dataIDsAfter) - indexIDsAfter = testRunList(t, "index", env.gopts) - rtest.Equals(t, indexIDs, indexIDsAfter) -} - -func TestBackupNonExistingFile(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - testSetupBackupData(t, env) - globalOptions.stderr = io.Discard - defer func() { - globalOptions.stderr = os.Stderr - }() - - p := filepath.Join(env.testdata, "0", "0", "9") - dirs := []string{ - filepath.Join(p, "0"), - filepath.Join(p, "1"), - filepath.Join(p, "nonexisting"), - filepath.Join(p, "5"), - } - - opts := BackupOptions{} - - testRunBackup(t, "", dirs, opts, env.gopts) -} - -func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) { - r, err := OpenRepository(context.TODO(), gopts) - rtest.OK(t, err) - - for id := range remove { - rtest.OK(t, r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()})) - } -} - -func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, removeTreePacks bool) { - r, err := OpenRepository(context.TODO(), gopts) - rtest.OK(t, err) - - // Get all tree packs - rtest.OK(t, r.LoadIndex(context.TODO())) - - treePacks := restic.NewIDSet() - r.Index().Each(context.TODO(), func(pb restic.PackedBlob) { - if pb.Type == restic.TreeBlob { - treePacks.Insert(pb.PackID) - } - }) - - // remove all packs containing data blobs - rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { - if treePacks.Has(id) != removeTreePacks || keep.Has(id) { - return nil - } - return r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()}) - })) -} - -func TestBackupSelfHealing(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - testRunInit(t, env.gopts) - - p := filepath.Join(env.testdata, "test/test") - rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) - rtest.OK(t, appendRandomData(p, 5)) - - opts := BackupOptions{} - - testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) - testRunCheck(t, env.gopts) - - // remove all data packs - removePacksExcept(env.gopts, t, restic.NewIDSet(), false) - - testRunRebuildIndex(t, env.gopts) - // now the repo is also missing the data blob in the index; check should report this - testRunCheckMustFail(t, env.gopts) - - // second backup should report an error but "heal" this situation - err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) - rtest.Assert(t, err != nil, - "backup should have reported an error") - testRunCheck(t, env.gopts) -} - -func TestBackupTreeLoadError(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - testRunInit(t, env.gopts) - p := filepath.Join(env.testdata, "test/test") - rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) - rtest.OK(t, appendRandomData(p, 5)) - - opts := BackupOptions{} - // Backup a subdirectory first, such that we can remove the tree pack for the subdirectory - testRunBackup(t, env.testdata, []string{"test"}, opts, env.gopts) - - r, err := OpenRepository(context.TODO(), env.gopts) - rtest.OK(t, err) - rtest.OK(t, r.LoadIndex(context.TODO())) - treePacks := restic.NewIDSet() - r.Index().Each(context.TODO(), func(pb restic.PackedBlob) { - if pb.Type == restic.TreeBlob { - treePacks.Insert(pb.PackID) - } - }) - - testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) - testRunCheck(t, env.gopts) - - // delete the subdirectory pack first - for id := range treePacks { - rtest.OK(t, r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()})) - } - testRunRebuildIndex(t, env.gopts) - // now the repo is missing the tree blob in the index; check should report this - testRunCheckMustFail(t, env.gopts) - // second backup should report an error but "heal" this situation - err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) - rtest.Assert(t, err != nil, "backup should have reported an error for the subdirectory") - testRunCheck(t, env.gopts) - - // remove all tree packs - removePacksExcept(env.gopts, t, restic.NewIDSet(), true) - testRunRebuildIndex(t, env.gopts) - // now the repo is also missing the data blob in the index; check should report this - testRunCheckMustFail(t, env.gopts) - // second backup should report an error but "heal" this situation - err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) - rtest.Assert(t, err != nil, "backup should have reported an error") - testRunCheck(t, env.gopts) -} - -func includes(haystack []string, needle string) bool { - for _, s := range haystack { - if s == needle { - return true - } - } - - return false -} - -func loadSnapshotMap(t testing.TB, gopts GlobalOptions) map[string]struct{} { - snapshotIDs := testRunList(t, "snapshots", gopts) - - m := make(map[string]struct{}) - for _, id := range snapshotIDs { - m[id.String()] = struct{}{} - } - - return m -} - -func lastSnapshot(old, new map[string]struct{}) (map[string]struct{}, string) { - for k := range new { - if _, ok := old[k]; !ok { - old[k] = struct{}{} - return old, k - } - } - - return old, "" -} - -var backupExcludeFilenames = []string{ - "testfile1", - "foo.tar.gz", - "private/secret/passwords.txt", - "work/source/test.c", -} - -func TestBackupExclude(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - testRunInit(t, env.gopts) - - datadir := filepath.Join(env.base, "testdata") - - for _, filename := range backupExcludeFilenames { - fp := filepath.Join(datadir, filename) - rtest.OK(t, os.MkdirAll(filepath.Dir(fp), 0755)) - - f, err := os.Create(fp) - rtest.OK(t, err) - - fmt.Fprint(f, filename) - rtest.OK(t, f.Close()) - } - - snapshots := make(map[string]struct{}) - - opts := BackupOptions{} - - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) - snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) - files := testRunLs(t, env.gopts, snapshotID) - rtest.Assert(t, includes(files, "/testdata/foo.tar.gz"), - "expected file %q in first snapshot, but it's not included", "foo.tar.gz") - - opts.Excludes = []string{"*.tar.gz"} - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) - snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) - files = testRunLs(t, env.gopts, snapshotID) - rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"), - "expected file %q not in first snapshot, but it's included", "foo.tar.gz") - - opts.Excludes = []string{"*.tar.gz", "private/secret"} - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) - _, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) - files = testRunLs(t, env.gopts, snapshotID) - rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"), - "expected file %q not in first snapshot, but it's included", "foo.tar.gz") - rtest.Assert(t, !includes(files, "/testdata/private/secret/passwords.txt"), - "expected file %q not in first snapshot, but it's included", "passwords.txt") -} - -func TestBackupErrors(t *testing.T) { - if runtime.GOOS == "windows" { - return - } - env, cleanup := withTestEnvironment(t) - defer cleanup() - - testSetupBackupData(t, env) - - // Assume failure - inaccessibleFile := filepath.Join(env.testdata, "0", "0", "9", "0") - rtest.OK(t, os.Chmod(inaccessibleFile, 0000)) - defer func() { - rtest.OK(t, os.Chmod(inaccessibleFile, 0644)) - }() - opts := BackupOptions{} - gopts := env.gopts - gopts.stderr = io.Discard - err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, gopts) - rtest.Assert(t, err != nil, "Assumed failure, but no error occurred.") - rtest.Assert(t, err == ErrInvalidSourceData, "Wrong error returned") - testListSnapshots(t, env.gopts, 1) -} - -const ( - incrementalFirstWrite = 10 * 1042 * 1024 - incrementalSecondWrite = 1 * 1042 * 1024 - incrementalThirdWrite = 1 * 1042 * 1024 -) - -func appendRandomData(filename string, bytes uint) error { - f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - fmt.Fprint(os.Stderr, err) - return err - } - - _, err = f.Seek(0, 2) - if err != nil { - fmt.Fprint(os.Stderr, err) - return err - } - - _, err = io.Copy(f, io.LimitReader(rand.Reader, int64(bytes))) - if err != nil { - fmt.Fprint(os.Stderr, err) - return err - } - - return f.Close() -} - -func TestIncrementalBackup(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - testRunInit(t, env.gopts) - - datadir := filepath.Join(env.base, "testdata") - testfile := filepath.Join(datadir, "testfile") - - rtest.OK(t, appendRandomData(testfile, incrementalFirstWrite)) - - opts := BackupOptions{} - - testRunBackup(t, "", []string{datadir}, opts, env.gopts) - testRunCheck(t, env.gopts) - stat1 := dirStats(env.repo) - - rtest.OK(t, appendRandomData(testfile, incrementalSecondWrite)) - - testRunBackup(t, "", []string{datadir}, opts, env.gopts) - testRunCheck(t, env.gopts) - stat2 := dirStats(env.repo) - if stat2.size-stat1.size > incrementalFirstWrite { - t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite) - } - t.Logf("repository grown by %d bytes", stat2.size-stat1.size) - - rtest.OK(t, appendRandomData(testfile, incrementalThirdWrite)) - - testRunBackup(t, "", []string{datadir}, opts, env.gopts) - testRunCheck(t, env.gopts) - stat3 := dirStats(env.repo) - if stat3.size-stat2.size > incrementalFirstWrite { - t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite) - } - t.Logf("repository grown by %d bytes", stat3.size-stat2.size) -} - -func TestBackupTags(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - testSetupBackupData(t, env) - opts := BackupOptions{} - - testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) - testRunCheck(t, env.gopts) - newest, _ := testRunSnapshots(t, env.gopts) - - if newest == nil { - t.Fatal("expected a backup, got nil") - } - - rtest.Assert(t, len(newest.Tags) == 0, - "expected no tags, got %v", newest.Tags) - parent := newest - - opts.Tags = restic.TagLists{[]string{"NL"}} - testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) - testRunCheck(t, env.gopts) - newest, _ = testRunSnapshots(t, env.gopts) - - if newest == nil { - t.Fatal("expected a backup, got nil") - } - - rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL", - "expected one NL tag, got %v", newest.Tags) - // Tagged backup should have untagged backup as parent. - rtest.Assert(t, parent.ID.Equal(*newest.Parent), - "expected parent to be %v, got %v", parent.ID, newest.Parent) -} - -func testRunCopy(t testing.TB, srcGopts GlobalOptions, dstGopts GlobalOptions) { - gopts := srcGopts - gopts.Repo = dstGopts.Repo - gopts.password = dstGopts.password - copyOpts := CopyOptions{ - secondaryRepoOptions: secondaryRepoOptions{ - Repo: srcGopts.Repo, - password: srcGopts.password, - }, - } - - rtest.OK(t, runCopy(context.TODO(), copyOpts, gopts, nil)) -} - -func TestCopy(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - env2, cleanup2 := withTestEnvironment(t) - defer cleanup2() - - testSetupBackupData(t, env) - opts := BackupOptions{} - testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts) - testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts) - testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) - testRunCheck(t, env.gopts) - - testRunInit(t, env2.gopts) - testRunCopy(t, env.gopts, env2.gopts) - - snapshotIDs := testListSnapshots(t, env.gopts, 3) - copiedSnapshotIDs := testListSnapshots(t, env2.gopts, 3) - - // Check that the copies size seems reasonable - stat := dirStats(env.repo) - stat2 := dirStats(env2.repo) - sizeDiff := int64(stat.size) - int64(stat2.size) - if sizeDiff < 0 { - sizeDiff = -sizeDiff - } - rtest.Assert(t, sizeDiff < int64(stat.size)/50, "expected less than 2%% size difference: %v vs. %v", - stat.size, stat2.size) - - // Check integrity of the copy - testRunCheck(t, env2.gopts) - - // Check that the copied snapshots have the same tree contents as the old ones (= identical tree hash) - origRestores := make(map[string]struct{}) - for i, snapshotID := range snapshotIDs { - restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) - origRestores[restoredir] = struct{}{} - testRunRestore(t, env.gopts, restoredir, snapshotID) - } - for i, snapshotID := range copiedSnapshotIDs { - restoredir := filepath.Join(env2.base, fmt.Sprintf("restore%d", i)) - testRunRestore(t, env2.gopts, restoredir, snapshotID) - foundMatch := false - for cmpdir := range origRestores { - diff := directoriesContentsDiff(restoredir, cmpdir) - if diff == "" { - delete(origRestores, cmpdir) - foundMatch = true - } - } - - rtest.Assert(t, foundMatch, "found no counterpart for snapshot %v", snapshotID) - } - - rtest.Assert(t, len(origRestores) == 0, "found not copied snapshots") -} - -func TestCopyIncremental(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - env2, cleanup2 := withTestEnvironment(t) - defer cleanup2() - - testSetupBackupData(t, env) - opts := BackupOptions{} - testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts) - testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts) - testRunCheck(t, env.gopts) - - testRunInit(t, env2.gopts) - testRunCopy(t, env.gopts, env2.gopts) - - testListSnapshots(t, env.gopts, 2) - testListSnapshots(t, env2.gopts, 2) - - // Check that the copies size seems reasonable - testRunCheck(t, env2.gopts) - - // check that no snapshots are copied, as there are no new ones - testRunCopy(t, env.gopts, env2.gopts) - testRunCheck(t, env2.gopts) - testListSnapshots(t, env2.gopts, 2) - - // check that only new snapshots are copied - testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) - testRunCopy(t, env.gopts, env2.gopts) - testRunCheck(t, env2.gopts) - testListSnapshots(t, env.gopts, 3) - testListSnapshots(t, env2.gopts, 3) - - // also test the reverse direction - testRunCopy(t, env2.gopts, env.gopts) - testRunCheck(t, env.gopts) - testListSnapshots(t, env.gopts, 3) -} - -func TestCopyUnstableJSON(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - env2, cleanup2 := withTestEnvironment(t) - defer cleanup2() - - // contains a symlink created using `ln -s '../i/'$'\355\246\361''d/samba' broken-symlink` - datafile := filepath.Join("testdata", "copy-unstable-json.tar.gz") - rtest.SetupTarTestFixture(t, env.base, datafile) - - testRunInit(t, env2.gopts) - testRunCopy(t, env.gopts, env2.gopts) - testRunCheck(t, env2.gopts) - testListSnapshots(t, env2.gopts, 1) -} - -func TestInitCopyChunkerParams(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - env2, cleanup2 := withTestEnvironment(t) - defer cleanup2() - - testRunInit(t, env2.gopts) - - initOpts := InitOptions{ - secondaryRepoOptions: secondaryRepoOptions{ - Repo: env2.gopts.Repo, - password: env2.gopts.password, - }, - } - rtest.Assert(t, runInit(context.TODO(), initOpts, env.gopts, nil) != nil, "expected invalid init options to fail") - - initOpts.CopyChunkerParameters = true - rtest.OK(t, runInit(context.TODO(), initOpts, env.gopts, nil)) - - repo, err := OpenRepository(context.TODO(), env.gopts) - rtest.OK(t, err) - - otherRepo, err := OpenRepository(context.TODO(), env2.gopts) - rtest.OK(t, err) - - rtest.Assert(t, repo.Config().ChunkerPolynomial == otherRepo.Config().ChunkerPolynomial, - "expected equal chunker polynomials, got %v expected %v", repo.Config().ChunkerPolynomial, - otherRepo.Config().ChunkerPolynomial) -} - -func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) { - rtest.OK(t, runTag(context.TODO(), opts, gopts, []string{})) -} - -func TestTag(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - testSetupBackupData(t, env) - testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts) - testRunCheck(t, env.gopts) - newest, _ := testRunSnapshots(t, env.gopts) - if newest == nil { - t.Fatal("expected a new backup, got nil") - } - - rtest.Assert(t, len(newest.Tags) == 0, - "expected no tags, got %v", newest.Tags) - rtest.Assert(t, newest.Original == nil, - "expected original ID to be nil, got %v", newest.Original) - originalID := *newest.ID - - testRunTag(t, TagOptions{SetTags: restic.TagLists{[]string{"NL"}}}, env.gopts) - testRunCheck(t, env.gopts) - newest, _ = testRunSnapshots(t, env.gopts) - if newest == nil { - t.Fatal("expected a backup, got nil") - } - rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL", - "set failed, expected one NL tag, got %v", newest.Tags) - rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil") - rtest.Assert(t, *newest.Original == originalID, - "expected original ID to be set to the first snapshot id") - - testRunTag(t, TagOptions{AddTags: restic.TagLists{[]string{"CH"}}}, env.gopts) - testRunCheck(t, env.gopts) - newest, _ = testRunSnapshots(t, env.gopts) - if newest == nil { - t.Fatal("expected a backup, got nil") - } - rtest.Assert(t, len(newest.Tags) == 2 && newest.Tags[0] == "NL" && newest.Tags[1] == "CH", - "add failed, expected CH,NL tags, got %v", newest.Tags) - rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil") - rtest.Assert(t, *newest.Original == originalID, - "expected original ID to be set to the first snapshot id") - - testRunTag(t, TagOptions{RemoveTags: restic.TagLists{[]string{"NL"}}}, env.gopts) - testRunCheck(t, env.gopts) - newest, _ = testRunSnapshots(t, env.gopts) - if newest == nil { - t.Fatal("expected a backup, got nil") - } - rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "CH", - "remove failed, expected one CH tag, got %v", newest.Tags) - rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil") - rtest.Assert(t, *newest.Original == originalID, - "expected original ID to be set to the first snapshot id") - - testRunTag(t, TagOptions{AddTags: restic.TagLists{[]string{"US", "RU"}}}, env.gopts) - testRunTag(t, TagOptions{RemoveTags: restic.TagLists{[]string{"CH", "US", "RU"}}}, env.gopts) - testRunCheck(t, env.gopts) - newest, _ = testRunSnapshots(t, env.gopts) - if newest == nil { - t.Fatal("expected a backup, got nil") - } - rtest.Assert(t, len(newest.Tags) == 0, - "expected no tags, got %v", newest.Tags) - rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil") - rtest.Assert(t, *newest.Original == originalID, - "expected original ID to be set to the first snapshot id") - - // Check special case of removing all tags. - testRunTag(t, TagOptions{SetTags: restic.TagLists{[]string{""}}}, env.gopts) - testRunCheck(t, env.gopts) - newest, _ = testRunSnapshots(t, env.gopts) - if newest == nil { - t.Fatal("expected a backup, got nil") - } - rtest.Assert(t, len(newest.Tags) == 0, - "expected no tags, got %v", newest.Tags) - rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil") - rtest.Assert(t, *newest.Original == originalID, - "expected original ID to be set to the first snapshot id") -} - -func testRunKeyListOtherIDs(t testing.TB, gopts GlobalOptions) []string { - buf := bytes.NewBuffer(nil) - - globalOptions.stdout = buf - defer func() { - globalOptions.stdout = os.Stdout - }() - - rtest.OK(t, runKey(context.TODO(), gopts, []string{"list"})) - - scanner := bufio.NewScanner(buf) - exp := regexp.MustCompile(`^ ([a-f0-9]+) `) - - IDs := []string{} - for scanner.Scan() { - if id := exp.FindStringSubmatch(scanner.Text()); id != nil { - IDs = append(IDs, id[1]) - } - } - - return IDs -} - -func testRunKeyAddNewKey(t testing.TB, newPassword string, gopts GlobalOptions) { - testKeyNewPassword = newPassword - defer func() { - testKeyNewPassword = "" - }() - - rtest.OK(t, runKey(context.TODO(), gopts, []string{"add"})) -} - -func testRunKeyAddNewKeyUserHost(t testing.TB, gopts GlobalOptions) { - testKeyNewPassword = "john's geheimnis" - defer func() { - testKeyNewPassword = "" - keyUsername = "" - keyHostname = "" - }() - - rtest.OK(t, cmdKey.Flags().Parse([]string{"--user=john", "--host=example.com"})) - - t.Log("adding key for john@example.com") - rtest.OK(t, runKey(context.TODO(), gopts, []string{"add"})) - - repo, err := OpenRepository(context.TODO(), gopts) - rtest.OK(t, err) - key, err := repository.SearchKey(context.TODO(), repo, testKeyNewPassword, 2, "") - rtest.OK(t, err) - - rtest.Equals(t, "john", key.Username) - rtest.Equals(t, "example.com", key.Hostname) -} - -func testRunKeyPasswd(t testing.TB, newPassword string, gopts GlobalOptions) { - testKeyNewPassword = newPassword - defer func() { - testKeyNewPassword = "" - }() - - rtest.OK(t, runKey(context.TODO(), gopts, []string{"passwd"})) -} - -func testRunKeyRemove(t testing.TB, gopts GlobalOptions, IDs []string) { - t.Logf("remove %d keys: %q\n", len(IDs), IDs) - for _, id := range IDs { - rtest.OK(t, runKey(context.TODO(), gopts, []string{"remove", id})) - } -} - -func TestKeyAddRemove(t *testing.T) { - passwordList := []string{ - "OnnyiasyatvodsEvVodyawit", - "raicneirvOjEfEigonOmLasOd", - } - - env, cleanup := withTestEnvironment(t) - // must list keys more than once - env.gopts.backendTestHook = nil - defer cleanup() - - testRunInit(t, env.gopts) - - testRunKeyPasswd(t, "geheim2", env.gopts) - env.gopts.password = "geheim2" - t.Logf("changed password to %q", env.gopts.password) - - for _, newPassword := range passwordList { - testRunKeyAddNewKey(t, newPassword, env.gopts) - t.Logf("added new password %q", newPassword) - env.gopts.password = newPassword - testRunKeyRemove(t, env.gopts, testRunKeyListOtherIDs(t, env.gopts)) - } - - env.gopts.password = passwordList[len(passwordList)-1] - t.Logf("testing access with last password %q\n", env.gopts.password) - rtest.OK(t, runKey(context.TODO(), env.gopts, []string{"list"})) - testRunCheck(t, env.gopts) - - testRunKeyAddNewKeyUserHost(t, env.gopts) -} - -type emptySaveBackend struct { - restic.Backend -} - -func (b *emptySaveBackend) Save(ctx context.Context, h restic.Handle, _ restic.RewindReader) error { - return b.Backend.Save(ctx, h, restic.NewByteReader([]byte{}, nil)) -} - -func TestKeyProblems(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - testRunInit(t, env.gopts) - env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { - return &emptySaveBackend{r}, nil - } - - testKeyNewPassword = "geheim2" - defer func() { - testKeyNewPassword = "" - }() - - err := runKey(context.TODO(), env.gopts, []string{"passwd"}) - t.Log(err) - rtest.Assert(t, err != nil, "expected passwd change to fail") - - err = runKey(context.TODO(), env.gopts, []string{"add"}) - t.Log(err) - rtest.Assert(t, err != nil, "expected key adding to fail") - - t.Logf("testing access with initial password %q\n", env.gopts.password) - rtest.OK(t, runKey(context.TODO(), env.gopts, []string{"list"})) - testRunCheck(t, env.gopts) -} - -func testFileSize(filename string, size int64) error { - fi, err := os.Stat(filename) - if err != nil { - return err - } - - if fi.Size() != size { - return errors.Fatalf("wrong file size for %v: expected %v, got %v", filename, size, fi.Size()) - } - - return nil -} - -func TestRestoreFilter(t *testing.T) { - testfiles := []struct { - name string - size uint - }{ - {"testfile1.c", 100}, - {"testfile2.exe", 101}, - {"subdir1/subdir2/testfile3.docx", 102}, - {"subdir1/subdir2/testfile4.c", 102}, - } - - env, cleanup := withTestEnvironment(t) - defer cleanup() - - testRunInit(t, env.gopts) - - for _, testFile := range testfiles { - p := filepath.Join(env.testdata, testFile.name) - rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) - rtest.OK(t, appendRandomData(p, testFile.size)) - } - - opts := BackupOptions{} - - testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) - testRunCheck(t, env.gopts) - - snapshotID := testListSnapshots(t, env.gopts, 1)[0] - - // no restore filter should restore all files - testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID) - for _, testFile := range testfiles { - rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", testFile.name), int64(testFile.size))) - } - - for i, pat := range []string{"*.c", "*.exe", "*", "*file3*"} { - base := filepath.Join(env.base, fmt.Sprintf("restore%d", i+1)) - testRunRestoreExcludes(t, env.gopts, base, snapshotID, []string{pat}) - for _, testFile := range testfiles { - err := testFileSize(filepath.Join(base, "testdata", testFile.name), int64(testFile.size)) - if ok, _ := filter.Match(pat, filepath.Base(testFile.name)); !ok { - rtest.OK(t, err) - } else { - rtest.Assert(t, os.IsNotExist(err), - "expected %v to not exist in restore step %v, but it exists, err %v", testFile.name, i+1, err) - } - } - } -} - -func TestRestore(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - testRunInit(t, env.gopts) - - for i := 0; i < 10; i++ { - p := filepath.Join(env.testdata, fmt.Sprintf("foo/bar/testfile%v", i)) - rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) - rtest.OK(t, appendRandomData(p, uint(mrand.Intn(2<<21)))) - } - - opts := BackupOptions{} - - testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) - testRunCheck(t, env.gopts) - - // Restore latest without any filters - restoredir := filepath.Join(env.base, "restore") - testRunRestoreLatest(t, env.gopts, restoredir, nil, nil) - - diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, filepath.Base(env.testdata))) - rtest.Assert(t, diff == "", "directories are not equal %v", diff) -} - -func TestRestoreLatest(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - testRunInit(t, env.gopts) - - p := filepath.Join(env.testdata, "testfile.c") - rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) - rtest.OK(t, appendRandomData(p, 100)) - - opts := BackupOptions{} - - // chdir manually here so we can get the current directory. This is not the - // same as the temp dir returned by os.MkdirTemp() on darwin. - back := rtest.Chdir(t, filepath.Dir(env.testdata)) - defer back() - - curdir, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts) - testRunCheck(t, env.gopts) - - rtest.OK(t, os.Remove(p)) - rtest.OK(t, appendRandomData(p, 101)) - testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts) - testRunCheck(t, env.gopts) - - // Restore latest without any filters - testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore0"), nil, nil) - rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", "testfile.c"), int64(101))) - - // Setup test files in different directories backed up in different snapshots - p1 := filepath.Join(curdir, filepath.FromSlash("p1/testfile.c")) - - rtest.OK(t, os.MkdirAll(filepath.Dir(p1), 0755)) - rtest.OK(t, appendRandomData(p1, 102)) - testRunBackup(t, "", []string{"p1"}, opts, env.gopts) - testRunCheck(t, env.gopts) - - p2 := filepath.Join(curdir, filepath.FromSlash("p2/testfile.c")) - - rtest.OK(t, os.MkdirAll(filepath.Dir(p2), 0755)) - rtest.OK(t, appendRandomData(p2, 103)) - testRunBackup(t, "", []string{"p2"}, opts, env.gopts) - testRunCheck(t, env.gopts) - - p1rAbs := filepath.Join(env.base, "restore1", "p1/testfile.c") - p2rAbs := filepath.Join(env.base, "restore2", "p2/testfile.c") - - testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore1"), []string{filepath.Dir(p1)}, nil) - rtest.OK(t, testFileSize(p1rAbs, int64(102))) - if _, err := os.Stat(p2rAbs); os.IsNotExist(err) { - rtest.Assert(t, os.IsNotExist(err), - "expected %v to not exist in restore, but it exists, err %v", p2rAbs, err) - } - - testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore2"), []string{filepath.Dir(p2)}, nil) - rtest.OK(t, testFileSize(p2rAbs, int64(103))) - if _, err := os.Stat(p1rAbs); os.IsNotExist(err) { - rtest.Assert(t, os.IsNotExist(err), - "expected %v to not exist in restore, but it exists, err %v", p1rAbs, err) - } -} - -func TestRestoreWithPermissionFailure(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - datafile := filepath.Join("testdata", "repo-restore-permissions-test.tar.gz") - rtest.SetupTarTestFixture(t, env.base, datafile) - - snapshots := testListSnapshots(t, env.gopts, 1) - - globalOptions.stderr = io.Discard - defer func() { - globalOptions.stderr = os.Stderr - }() - - testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0]) - - // make sure that all files have been restored, regardless of any - // permission errors - files := testRunLs(t, env.gopts, snapshots[0].String()) - for _, filename := range files { - fi, err := os.Lstat(filepath.Join(env.base, "restore", filename)) - rtest.OK(t, err) - - rtest.Assert(t, !isFile(fi) || fi.Size() > 0, - "file %v restored, but filesize is 0", filename) - } -} - -func setZeroModTime(filename string) error { - var utimes = []syscall.Timespec{ - syscall.NsecToTimespec(0), - syscall.NsecToTimespec(0), - } - - return syscall.UtimesNano(filename, utimes) -} - -func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - testRunInit(t, env.gopts) - - p := filepath.Join(env.testdata, "subdir1", "subdir2", "subdir3", "file.ext") - rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) - rtest.OK(t, appendRandomData(p, 200)) - rtest.OK(t, setZeroModTime(filepath.Join(env.testdata, "subdir1", "subdir2"))) - - opts := BackupOptions{} - - testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) - testRunCheck(t, env.gopts) - - snapshotID := testListSnapshots(t, env.gopts, 1)[0] - - // restore with filter "*.ext", this should restore "file.ext", but - // since the directories are ignored and only created because of - // "file.ext", no meta data should be restored for them. - testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID, []string{"*.ext"}) - - f1 := filepath.Join(env.base, "restore0", "testdata", "subdir1", "subdir2") - _, err := os.Stat(f1) - rtest.OK(t, err) - - // restore with filter "*", this should restore meta data on everything. - testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore1"), snapshotID, []string{"*"}) - - f2 := filepath.Join(env.base, "restore1", "testdata", "subdir1", "subdir2") - fi, err := os.Stat(f2) - rtest.OK(t, err) - - rtest.Assert(t, fi.ModTime() == time.Unix(0, 0), - "meta data of intermediate directory hasn't been restore") -} - -func TestFind(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - datafile := testSetupBackupData(t, env) - opts := BackupOptions{} - - testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) - testRunCheck(t, env.gopts) - - results := testRunFind(t, false, env.gopts, "unexistingfile") - rtest.Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile) - - results = testRunFind(t, false, env.gopts, "testfile") - lines := strings.Split(string(results), "\n") - rtest.Assert(t, len(lines) == 2, "expected one file found in repo (%v)", datafile) - - results = testRunFind(t, false, env.gopts, "testfile*") - lines = strings.Split(string(results), "\n") - rtest.Assert(t, len(lines) == 4, "expected three files found in repo (%v)", datafile) -} - -type testMatch struct { - Path string `json:"path,omitempty"` - Permissions string `json:"permissions,omitempty"` - Size uint64 `json:"size,omitempty"` - Date time.Time `json:"date,omitempty"` - UID uint32 `json:"uid,omitempty"` - GID uint32 `json:"gid,omitempty"` -} - -type testMatches struct { - Hits int `json:"hits,omitempty"` - SnapshotID string `json:"snapshot,omitempty"` - Matches []testMatch `json:"matches,omitempty"` -} - -func TestFindJSON(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - datafile := testSetupBackupData(t, env) - opts := BackupOptions{} - - testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) - testRunCheck(t, env.gopts) - - results := testRunFind(t, true, env.gopts, "unexistingfile") - matches := []testMatches{} - rtest.OK(t, json.Unmarshal(results, &matches)) - rtest.Assert(t, len(matches) == 0, "expected no match in repo (%v)", datafile) - - results = testRunFind(t, true, env.gopts, "testfile") - rtest.OK(t, json.Unmarshal(results, &matches)) - rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile) - rtest.Assert(t, len(matches[0].Matches) == 1, "expected a single file to match (%v)", datafile) - rtest.Assert(t, matches[0].Hits == 1, "expected hits to show 1 match (%v)", datafile) - - results = testRunFind(t, true, env.gopts, "testfile*") - rtest.OK(t, json.Unmarshal(results, &matches)) - rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile) - rtest.Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", datafile) - rtest.Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile) -} - -func testRebuildIndex(t *testing.T, backendTestHook backendWrapper) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz") - rtest.SetupTarTestFixture(t, env.base, datafile) - - out, err := testRunCheckOutput(env.gopts, false) - if !strings.Contains(out, "contained in several indexes") { - t.Fatalf("did not find checker hint for packs in several indexes") - } - - if err != nil { - t.Fatalf("expected no error from checker for test repository, got %v", err) - } - - if !strings.Contains(out, "restic repair index") { - t.Fatalf("did not find hint for repair index command") - } - - env.gopts.backendTestHook = backendTestHook - testRunRebuildIndex(t, env.gopts) - - env.gopts.backendTestHook = nil - out, err = testRunCheckOutput(env.gopts, false) - if len(out) != 0 { - t.Fatalf("expected no output from the checker, got: %v", out) - } - - if err != nil { - t.Fatalf("expected no error from checker after repair index, got: %v", err) - } -} - -func TestRebuildIndex(t *testing.T) { - testRebuildIndex(t, nil) -} - -func TestRebuildIndexAlwaysFull(t *testing.T) { - indexFull := index.IndexFull - defer func() { - index.IndexFull = indexFull - }() - index.IndexFull = func(*index.Index, bool) bool { return true } - testRebuildIndex(t, nil) -} - -// indexErrorBackend modifies the first index after reading. -type indexErrorBackend struct { - restic.Backend - lock sync.Mutex - hasErred bool -} - -func (b *indexErrorBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) error { - return b.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error { - // protect hasErred - b.lock.Lock() - defer b.lock.Unlock() - if !b.hasErred && h.Type == restic.IndexFile { - b.hasErred = true - return consumer(errorReadCloser{rd}) - } - return consumer(rd) - }) -} - -type errorReadCloser struct { - io.Reader -} - -func (erd errorReadCloser) Read(p []byte) (int, error) { - n, err := erd.Reader.Read(p) - if n > 0 { - p[0] ^= 1 - } - return n, err -} - -func TestRebuildIndexDamage(t *testing.T) { - testRebuildIndex(t, func(r restic.Backend) (restic.Backend, error) { - return &indexErrorBackend{ - Backend: r, - }, nil - }) -} - -type appendOnlyBackend struct { - restic.Backend -} - -// called via repo.Backend().Remove() -func (b *appendOnlyBackend) Remove(_ context.Context, h restic.Handle) error { - return errors.Errorf("Failed to remove %v", h) -} - -func TestRebuildIndexFailsOnAppendOnly(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz") - rtest.SetupTarTestFixture(t, env.base, datafile) - - globalOptions.stdout = io.Discard - defer func() { - globalOptions.stdout = os.Stdout - }() - - env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { - return &appendOnlyBackend{r}, nil - } - err := runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts) - if err == nil { - t.Error("expected rebuildIndex to fail") - } - t.Log(err) -} - func TestCheckRestoreNoLock(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() @@ -1623,198 +36,6 @@ func TestCheckRestoreNoLock(t *testing.T) { testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshotIDs[0]) } -func TestPrune(t *testing.T) { - testPruneVariants(t, false) - testPruneVariants(t, true) -} - -func testPruneVariants(t *testing.T, unsafeNoSpaceRecovery bool) { - suffix := "" - if unsafeNoSpaceRecovery { - suffix = "-recovery" - } - t.Run("0"+suffix, func(t *testing.T) { - opts := PruneOptions{MaxUnused: "0%", unsafeRecovery: unsafeNoSpaceRecovery} - checkOpts := CheckOptions{ReadData: true, CheckUnused: true} - testPrune(t, opts, checkOpts) - }) - - t.Run("50"+suffix, func(t *testing.T) { - opts := PruneOptions{MaxUnused: "50%", unsafeRecovery: unsafeNoSpaceRecovery} - checkOpts := CheckOptions{ReadData: true} - testPrune(t, opts, checkOpts) - }) - - t.Run("unlimited"+suffix, func(t *testing.T) { - opts := PruneOptions{MaxUnused: "unlimited", unsafeRecovery: unsafeNoSpaceRecovery} - checkOpts := CheckOptions{ReadData: true} - testPrune(t, opts, checkOpts) - }) - - t.Run("CachableOnly"+suffix, func(t *testing.T) { - opts := PruneOptions{MaxUnused: "5%", RepackCachableOnly: true, unsafeRecovery: unsafeNoSpaceRecovery} - checkOpts := CheckOptions{ReadData: true} - testPrune(t, opts, checkOpts) - }) - t.Run("Small", func(t *testing.T) { - opts := PruneOptions{MaxUnused: "unlimited", RepackSmall: true} - checkOpts := CheckOptions{ReadData: true, CheckUnused: true} - testPrune(t, opts, checkOpts) - }) -} - -func createPrunableRepo(t *testing.T, env *testEnvironment) { - testSetupBackupData(t, env) - opts := BackupOptions{} - - testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts) - firstSnapshot := testListSnapshots(t, env.gopts, 1)[0] - - testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts) - testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) - testListSnapshots(t, env.gopts, 3) - - testRunForgetJSON(t, env.gopts) - testRunForget(t, env.gopts, firstSnapshot.String()) -} - -func testPrune(t *testing.T, pruneOpts PruneOptions, checkOpts CheckOptions) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - createPrunableRepo(t, env) - testRunPrune(t, env.gopts, pruneOpts) - rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil)) -} - -var pruneDefaultOptions = PruneOptions{MaxUnused: "5%"} - -func listPacks(gopts GlobalOptions, t *testing.T) restic.IDSet { - r, err := OpenRepository(context.TODO(), gopts) - rtest.OK(t, err) - - packs := restic.NewIDSet() - - rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { - packs.Insert(id) - return nil - })) - return packs -} - -func TestPruneWithDamagedRepository(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - datafile := filepath.Join("testdata", "backup-data.tar.gz") - testRunInit(t, env.gopts) - - rtest.SetupTarTestFixture(t, env.testdata, datafile) - opts := BackupOptions{} - - // create and delete snapshot to create unused blobs - testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts) - firstSnapshot := testListSnapshots(t, env.gopts, 1)[0] - testRunForget(t, env.gopts, firstSnapshot.String()) - - oldPacks := listPacks(env.gopts, t) - - // create new snapshot, but lose all data - testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) - testListSnapshots(t, env.gopts, 1) - removePacksExcept(env.gopts, t, oldPacks, false) - - oldHook := env.gopts.backendTestHook - env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil } - defer func() { - env.gopts.backendTestHook = oldHook - }() - // prune should fail - rtest.Assert(t, runPrune(context.TODO(), pruneDefaultOptions, env.gopts) == errorPacksMissing, - "prune should have reported index not complete error") -} - -// Test repos for edge cases -func TestEdgeCaseRepos(t *testing.T) { - opts := CheckOptions{} - - // repo where index is completely missing - // => check and prune should fail - t.Run("no-index", func(t *testing.T) { - testEdgeCaseRepo(t, "repo-index-missing.tar.gz", opts, pruneDefaultOptions, false, false) - }) - - // repo where an existing and used blob is missing from the index - // => check and prune should fail - t.Run("index-missing-blob", func(t *testing.T) { - testEdgeCaseRepo(t, "repo-index-missing-blob.tar.gz", opts, pruneDefaultOptions, false, false) - }) - - // repo where a blob is missing - // => check and prune should fail - t.Run("missing-data", func(t *testing.T) { - testEdgeCaseRepo(t, "repo-data-missing.tar.gz", opts, pruneDefaultOptions, false, false) - }) - - // repo where blobs which are not needed are missing or in invalid pack files - // => check should fail and prune should repair this - t.Run("missing-unused-data", func(t *testing.T) { - testEdgeCaseRepo(t, "repo-unused-data-missing.tar.gz", opts, pruneDefaultOptions, false, true) - }) - - // repo where data exists that is not referenced - // => check and prune should fully work - t.Run("unreferenced-data", func(t *testing.T) { - testEdgeCaseRepo(t, "repo-unreferenced-data.tar.gz", opts, pruneDefaultOptions, true, true) - }) - - // repo where an obsolete index still exists - // => check and prune should fully work - t.Run("obsolete-index", func(t *testing.T) { - testEdgeCaseRepo(t, "repo-obsolete-index.tar.gz", opts, pruneDefaultOptions, true, true) - }) - - // repo which contains mixed (data/tree) packs - // => check and prune should fully work - t.Run("mixed-packs", func(t *testing.T) { - testEdgeCaseRepo(t, "repo-mixed.tar.gz", opts, pruneDefaultOptions, true, true) - }) - - // repo which contains duplicate blobs - // => checking for unused data should report an error and prune resolves the - // situation - opts = CheckOptions{ - ReadData: true, - CheckUnused: true, - } - t.Run("duplicates", func(t *testing.T) { - testEdgeCaseRepo(t, "repo-duplicates.tar.gz", opts, pruneDefaultOptions, false, true) - }) -} - -func testEdgeCaseRepo(t *testing.T, tarfile string, optionsCheck CheckOptions, optionsPrune PruneOptions, checkOK, pruneOK bool) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - datafile := filepath.Join("testdata", tarfile) - rtest.SetupTarTestFixture(t, env.base, datafile) - - if checkOK { - testRunCheck(t, env.gopts) - } else { - rtest.Assert(t, runCheck(context.TODO(), optionsCheck, env.gopts, nil) != nil, - "check should have reported an error") - } - - if pruneOK { - testRunPrune(t, env.gopts, optionsPrune) - testRunCheck(t, env.gopts) - } else { - rtest.Assert(t, runPrune(context.TODO(), optionsPrune, env.gopts) != nil, - "prune should have reported an error") - } -} - // a listOnceBackend only allows listing once per filetype // listing filetypes more than once may cause problems with eventually consistent // backends (like e.g. Amazon S3) as the second listing may be inconsistent to what @@ -1870,286 +91,6 @@ func TestListOnce(t *testing.T) { rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{ReadAllPacks: true}, env.gopts)) } -func TestHardLink(t *testing.T) { - // this test assumes a test set with a single directory containing hard linked files - env, cleanup := withTestEnvironment(t) - defer cleanup() - - datafile := filepath.Join("testdata", "test.hl.tar.gz") - fd, err := os.Open(datafile) - if os.IsNotExist(err) { - t.Skipf("unable to find data file %q, skipping", datafile) - return - } - rtest.OK(t, err) - rtest.OK(t, fd.Close()) - - testRunInit(t, env.gopts) - - rtest.SetupTarTestFixture(t, env.testdata, datafile) - - linkTests := createFileSetPerHardlink(env.testdata) - - opts := BackupOptions{} - - // first backup - testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) - snapshotIDs := testListSnapshots(t, env.gopts, 1) - - testRunCheck(t, env.gopts) - - // restore all backups and compare - for i, snapshotID := range snapshotIDs { - restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) - t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir) - testRunRestore(t, env.gopts, restoredir, snapshotID) - diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata")) - rtest.Assert(t, diff == "", "directories are not equal %v", diff) - - linkResults := createFileSetPerHardlink(filepath.Join(restoredir, "testdata")) - rtest.Assert(t, linksEqual(linkTests, linkResults), - "links are not equal") - } - - testRunCheck(t, env.gopts) -} - -func linksEqual(source, dest map[uint64][]string) bool { - for _, vs := range source { - found := false - for kd, vd := range dest { - if linkEqual(vs, vd) { - delete(dest, kd) - found = true - break - } - } - if !found { - return false - } - } - - return len(dest) == 0 -} - -func linkEqual(source, dest []string) bool { - // equal if sliced are equal without considering order - if source == nil && dest == nil { - return true - } - - if source == nil || dest == nil { - return false - } - - if len(source) != len(dest) { - return false - } - - for i := range source { - found := false - for j := range dest { - if source[i] == dest[j] { - found = true - break - } - } - if !found { - return false - } - } - - return true -} - -func TestQuietBackup(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - testSetupBackupData(t, env) - opts := BackupOptions{} - - env.gopts.Quiet = false - testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) - testListSnapshots(t, env.gopts, 1) - - testRunCheck(t, env.gopts) - - env.gopts.Quiet = true - testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) - testListSnapshots(t, env.gopts, 2) - - testRunCheck(t, env.gopts) -} - -func copyFile(dst string, src string) error { - srcFile, err := os.Open(src) - if err != nil { - return err - } - - dstFile, err := os.Create(dst) - if err != nil { - // ignore subsequent errors - _ = srcFile.Close() - return err - } - - _, err = io.Copy(dstFile, srcFile) - if err != nil { - // ignore subsequent errors - _ = srcFile.Close() - _ = dstFile.Close() - return err - } - - err = srcFile.Close() - if err != nil { - // ignore subsequent errors - _ = dstFile.Close() - return err - } - - err = dstFile.Close() - if err != nil { - return err - } - - return nil -} - -var diffOutputRegexPatterns = []string{ - "-.+modfile", - "M.+modfile1", - "\\+.+modfile2", - "\\+.+modfile3", - "\\+.+modfile4", - "-.+submoddir", - "-.+submoddir.subsubmoddir", - "\\+.+submoddir2", - "\\+.+submoddir2.subsubmoddir", - "Files: +2 new, +1 removed, +1 changed", - "Dirs: +3 new, +2 removed", - "Data Blobs: +2 new, +1 removed", - "Added: +7[0-9]{2}\\.[0-9]{3} KiB", - "Removed: +2[0-9]{2}\\.[0-9]{3} KiB", -} - -func setupDiffRepo(t *testing.T) (*testEnvironment, func(), string, string) { - env, cleanup := withTestEnvironment(t) - testRunInit(t, env.gopts) - - datadir := filepath.Join(env.base, "testdata") - testdir := filepath.Join(datadir, "testdir") - subtestdir := filepath.Join(testdir, "subtestdir") - testfile := filepath.Join(testdir, "testfile") - - rtest.OK(t, os.Mkdir(testdir, 0755)) - rtest.OK(t, os.Mkdir(subtestdir, 0755)) - rtest.OK(t, appendRandomData(testfile, 256*1024)) - - moddir := filepath.Join(datadir, "moddir") - submoddir := filepath.Join(moddir, "submoddir") - subsubmoddir := filepath.Join(submoddir, "subsubmoddir") - modfile := filepath.Join(moddir, "modfile") - rtest.OK(t, os.Mkdir(moddir, 0755)) - rtest.OK(t, os.Mkdir(submoddir, 0755)) - rtest.OK(t, os.Mkdir(subsubmoddir, 0755)) - rtest.OK(t, copyFile(modfile, testfile)) - rtest.OK(t, appendRandomData(modfile+"1", 256*1024)) - - snapshots := make(map[string]struct{}) - opts := BackupOptions{} - testRunBackup(t, "", []string{datadir}, opts, env.gopts) - snapshots, firstSnapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) - - rtest.OK(t, os.Rename(modfile, modfile+"3")) - rtest.OK(t, os.Rename(submoddir, submoddir+"2")) - rtest.OK(t, appendRandomData(modfile+"1", 256*1024)) - rtest.OK(t, appendRandomData(modfile+"2", 256*1024)) - rtest.OK(t, os.Mkdir(modfile+"4", 0755)) - - testRunBackup(t, "", []string{datadir}, opts, env.gopts) - _, secondSnapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) - - return env, cleanup, firstSnapshotID, secondSnapshotID -} - -func TestDiff(t *testing.T) { - env, cleanup, firstSnapshotID, secondSnapshotID := setupDiffRepo(t) - defer cleanup() - - // quiet suppresses the diff output except for the summary - env.gopts.Quiet = false - _, err := testRunDiffOutput(env.gopts, "", secondSnapshotID) - rtest.Assert(t, err != nil, "expected error on invalid snapshot id") - - out, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID) - rtest.OK(t, err) - - for _, pattern := range diffOutputRegexPatterns { - r, err := regexp.Compile(pattern) - rtest.Assert(t, err == nil, "failed to compile regexp %v", pattern) - rtest.Assert(t, r.MatchString(out), "expected pattern %v in output, got\n%v", pattern, out) - } - - // check quiet output - env.gopts.Quiet = true - outQuiet, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID) - rtest.OK(t, err) - - rtest.Assert(t, len(outQuiet) < len(out), "expected shorter output on quiet mode %v vs. %v", len(outQuiet), len(out)) -} - -type typeSniffer struct { - MessageType string `json:"message_type"` -} - -func TestDiffJSON(t *testing.T) { - env, cleanup, firstSnapshotID, secondSnapshotID := setupDiffRepo(t) - defer cleanup() - - // quiet suppresses the diff output except for the summary - env.gopts.Quiet = false - env.gopts.JSON = true - out, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID) - rtest.OK(t, err) - - var stat DiffStatsContainer - var changes int - - scanner := bufio.NewScanner(strings.NewReader(out)) - for scanner.Scan() { - line := scanner.Text() - var sniffer typeSniffer - rtest.OK(t, json.Unmarshal([]byte(line), &sniffer)) - switch sniffer.MessageType { - case "change": - changes++ - case "statistics": - rtest.OK(t, json.Unmarshal([]byte(line), &stat)) - default: - t.Fatalf("unexpected message type %v", sniffer.MessageType) - } - } - rtest.Equals(t, 9, changes) - rtest.Assert(t, stat.Added.Files == 2 && stat.Added.Dirs == 3 && stat.Added.DataBlobs == 2 && - stat.Removed.Files == 1 && stat.Removed.Dirs == 2 && stat.Removed.DataBlobs == 1 && - stat.ChangedFiles == 1, "unexpected statistics") - - // check quiet output - env.gopts.Quiet = true - outQuiet, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID) - rtest.OK(t, err) - - stat = DiffStatsContainer{} - rtest.OK(t, json.Unmarshal([]byte(outQuiet), &stat)) - rtest.Assert(t, stat.Added.Files == 2 && stat.Added.Dirs == 3 && stat.Added.DataBlobs == 2 && - stat.Removed.Files == 1 && stat.Removed.Dirs == 2 && stat.Removed.DataBlobs == 1 && - stat.ChangedFiles == 1, "unexpected statistics") - rtest.Assert(t, stat.SourceSnapshot == firstSnapshotID && stat.TargetSnapshot == secondSnapshotID, "unexpected snapshot ids") -} - type writeToOnly struct { rd io.Reader } diff --git a/cmd/restic/local_layout_test.go b/cmd/restic/local_layout_test.go deleted file mode 100644 index eb614f1c3..000000000 --- a/cmd/restic/local_layout_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "path/filepath" - "testing" - - rtest "github.com/restic/restic/internal/test" -) - -func TestRestoreLocalLayout(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - var tests = []struct { - filename string - layout string - }{ - {"repo-layout-default.tar.gz", ""}, - {"repo-layout-s3legacy.tar.gz", ""}, - {"repo-layout-default.tar.gz", "default"}, - {"repo-layout-s3legacy.tar.gz", "s3legacy"}, - } - - for _, test := range tests { - datafile := filepath.Join("..", "..", "internal", "backend", "testdata", test.filename) - - rtest.SetupTarTestFixture(t, env.base, datafile) - - env.gopts.extended["local.layout"] = test.layout - - // check the repo - testRunCheck(t, env.gopts) - - // restore latest snapshot - target := filepath.Join(env.base, "restore") - testRunRestoreLatest(t, env.gopts, target, nil, nil) - - rtest.RemoveAll(t, filepath.Join(env.base, "repo")) - rtest.RemoveAll(t, target) - } -} From cebce52c16d55fe93a33aadf6ce625fc63037feb Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 7 May 2023 22:06:39 +0200 Subject: [PATCH 06/12] test: add helper to capture stdout for integration tests --- cmd/restic/cmd_backup_integration_test.go | 27 ++++++++++--------- cmd/restic/cmd_check_integration_test.go | 22 +++++---------- cmd/restic/cmd_diff_integration_test.go | 21 +++++---------- cmd/restic/cmd_find_integration_test.go | 19 +++++-------- cmd/restic/cmd_key_integration_test.go | 14 +++------- cmd/restic/cmd_list_integration_test.go | 13 +++------ cmd/restic/cmd_ls_integration_test.go | 21 +++++---------- .../cmd_repair_index_integration_test.go | 27 +++++++++---------- cmd/restic/cmd_restore_integration_test.go | 11 ++++---- cmd/restic/cmd_snapshots_integration_test.go | 18 +++++-------- cmd/restic/global_test.go | 15 +++-------- cmd/restic/integration_helpers_test.go | 18 +++++++++++++ 12 files changed, 93 insertions(+), 133 deletions(-) diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index b6491dfbf..769a60c03 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -205,22 +205,23 @@ func TestBackupNonExistingFile(t *testing.T) { defer cleanup() testSetupBackupData(t, env) - globalOptions.stderr = io.Discard - defer func() { - globalOptions.stderr = os.Stderr - }() - p := filepath.Join(env.testdata, "0", "0", "9") - dirs := []string{ - filepath.Join(p, "0"), - filepath.Join(p, "1"), - filepath.Join(p, "nonexisting"), - filepath.Join(p, "5"), - } + withRestoreGlobalOptions(func() error { + globalOptions.stderr = io.Discard - opts := BackupOptions{} + p := filepath.Join(env.testdata, "0", "0", "9") + dirs := []string{ + filepath.Join(p, "0"), + filepath.Join(p, "1"), + filepath.Join(p, "nonexisting"), + filepath.Join(p, "5"), + } - testRunBackup(t, "", dirs, opts, env.gopts) + opts := BackupOptions{} + + testRunBackup(t, "", dirs, opts, env.gopts) + return nil + }) } func TestBackupSelfHealing(t *testing.T) { diff --git a/cmd/restic/cmd_check_integration_test.go b/cmd/restic/cmd_check_integration_test.go index 05bc436c4..9eb4fec62 100644 --- a/cmd/restic/cmd_check_integration_test.go +++ b/cmd/restic/cmd_check_integration_test.go @@ -1,9 +1,7 @@ package main import ( - "bytes" "context" - "os" "testing" rtest "github.com/restic/restic/internal/test" @@ -25,18 +23,12 @@ func testRunCheckMustFail(t testing.TB, gopts GlobalOptions) { } func testRunCheckOutput(gopts GlobalOptions, checkUnused bool) (string, error) { - buf := bytes.NewBuffer(nil) - - globalOptions.stdout = buf - defer func() { - globalOptions.stdout = os.Stdout - }() - - opts := CheckOptions{ - ReadData: true, - CheckUnused: checkUnused, - } - - err := runCheck(context.TODO(), opts, gopts, nil) + buf, err := withCaptureStdout(func() error { + opts := CheckOptions{ + ReadData: true, + CheckUnused: checkUnused, + } + return runCheck(context.TODO(), opts, gopts, nil) + }) return buf.String(), err } diff --git a/cmd/restic/cmd_diff_integration_test.go b/cmd/restic/cmd_diff_integration_test.go index ae145fedf..c46f00f4f 100644 --- a/cmd/restic/cmd_diff_integration_test.go +++ b/cmd/restic/cmd_diff_integration_test.go @@ -2,7 +2,6 @@ package main import ( "bufio" - "bytes" "context" "encoding/json" "io" @@ -16,20 +15,14 @@ import ( ) func testRunDiffOutput(gopts GlobalOptions, firstSnapshotID string, secondSnapshotID string) (string, error) { - buf := bytes.NewBuffer(nil) + buf, err := withCaptureStdout(func() error { + gopts.stdout = globalOptions.stdout - globalOptions.stdout = buf - oldStdout := gopts.stdout - gopts.stdout = buf - defer func() { - globalOptions.stdout = os.Stdout - gopts.stdout = oldStdout - }() - - opts := DiffOptions{ - ShowMetadata: false, - } - err := runDiff(context.TODO(), opts, gopts, []string{firstSnapshotID, secondSnapshotID}) + opts := DiffOptions{ + ShowMetadata: false, + } + return runDiff(context.TODO(), opts, gopts, []string{firstSnapshotID, secondSnapshotID}) + }) return buf.String(), err } diff --git a/cmd/restic/cmd_find_integration_test.go b/cmd/restic/cmd_find_integration_test.go index 0ee8839e7..236721154 100644 --- a/cmd/restic/cmd_find_integration_test.go +++ b/cmd/restic/cmd_find_integration_test.go @@ -1,10 +1,8 @@ package main import ( - "bytes" "context" "encoding/json" - "os" "strings" "testing" "time" @@ -13,18 +11,13 @@ import ( ) func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern string) []byte { - buf := bytes.NewBuffer(nil) - globalOptions.stdout = buf - globalOptions.JSON = wantJSON - defer func() { - globalOptions.stdout = os.Stdout - globalOptions.JSON = false - }() - - opts := FindOptions{} - - rtest.OK(t, runFind(context.TODO(), opts, gopts, []string{pattern})) + buf, err := withCaptureStdout(func() error { + globalOptions.JSON = wantJSON + opts := FindOptions{} + return runFind(context.TODO(), opts, gopts, []string{pattern}) + }) + rtest.OK(t, err) return buf.Bytes() } diff --git a/cmd/restic/cmd_key_integration_test.go b/cmd/restic/cmd_key_integration_test.go index 9e327d16c..9ea5795ba 100644 --- a/cmd/restic/cmd_key_integration_test.go +++ b/cmd/restic/cmd_key_integration_test.go @@ -2,9 +2,7 @@ package main import ( "bufio" - "bytes" "context" - "os" "regexp" "testing" @@ -14,14 +12,10 @@ import ( ) func testRunKeyListOtherIDs(t testing.TB, gopts GlobalOptions) []string { - buf := bytes.NewBuffer(nil) - - globalOptions.stdout = buf - defer func() { - globalOptions.stdout = os.Stdout - }() - - rtest.OK(t, runKey(context.TODO(), gopts, []string{"list"})) + buf, err := withCaptureStdout(func() error { + return runKey(context.TODO(), gopts, []string{"list"}) + }) + rtest.OK(t, err) scanner := bufio.NewScanner(buf) exp := regexp.MustCompile(`^ ([a-f0-9]+) `) diff --git a/cmd/restic/cmd_list_integration_test.go b/cmd/restic/cmd_list_integration_test.go index ce8ee4909..4140a3ea8 100644 --- a/cmd/restic/cmd_list_integration_test.go +++ b/cmd/restic/cmd_list_integration_test.go @@ -2,10 +2,8 @@ package main import ( "bufio" - "bytes" "context" "io" - "os" "testing" "github.com/restic/restic/internal/restic" @@ -13,13 +11,10 @@ import ( ) func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs { - buf := bytes.NewBuffer(nil) - globalOptions.stdout = buf - defer func() { - globalOptions.stdout = os.Stdout - }() - - rtest.OK(t, runList(context.TODO(), cmdList, opts, []string{tpe})) + buf, err := withCaptureStdout(func() error { + return runList(context.TODO(), cmdList, opts, []string{tpe}) + }) + rtest.OK(t, err) return parseIDsFromReader(t, buf) } diff --git a/cmd/restic/cmd_ls_integration_test.go b/cmd/restic/cmd_ls_integration_test.go index 0d2fd85db..a93092f58 100644 --- a/cmd/restic/cmd_ls_integration_test.go +++ b/cmd/restic/cmd_ls_integration_test.go @@ -1,9 +1,7 @@ package main import ( - "bytes" "context" - "os" "strings" "testing" @@ -11,18 +9,11 @@ import ( ) func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string { - buf := bytes.NewBuffer(nil) - globalOptions.stdout = buf - quiet := globalOptions.Quiet - globalOptions.Quiet = true - defer func() { - globalOptions.stdout = os.Stdout - globalOptions.Quiet = quiet - }() - - opts := LsOptions{} - - rtest.OK(t, runLs(context.TODO(), opts, gopts, []string{snapshotID})) - + buf, err := withCaptureStdout(func() error { + globalOptions.Quiet = true + opts := LsOptions{} + return runLs(context.TODO(), opts, gopts, []string{snapshotID}) + }) + rtest.OK(t, err) return strings.Split(buf.String(), "\n") } diff --git a/cmd/restic/cmd_repair_index_integration_test.go b/cmd/restic/cmd_repair_index_integration_test.go index a5711da84..f451173a3 100644 --- a/cmd/restic/cmd_repair_index_integration_test.go +++ b/cmd/restic/cmd_repair_index_integration_test.go @@ -3,7 +3,6 @@ package main import ( "context" "io" - "os" "path/filepath" "strings" "sync" @@ -16,12 +15,10 @@ import ( ) func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) { - globalOptions.stdout = io.Discard - defer func() { - globalOptions.stdout = os.Stdout - }() - - rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts)) + rtest.OK(t, withRestoreGlobalOptions(func() error { + globalOptions.stdout = io.Discard + return runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts) + })) } func testRebuildIndex(t *testing.T, backendTestHook backendWrapper) { @@ -127,15 +124,15 @@ func TestRebuildIndexFailsOnAppendOnly(t *testing.T) { datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz") rtest.SetupTarTestFixture(t, env.base, datafile) - globalOptions.stdout = io.Discard - defer func() { - globalOptions.stdout = os.Stdout - }() + err := withRestoreGlobalOptions(func() error { + globalOptions.stdout = io.Discard + + env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { + return &appendOnlyBackend{r}, nil + } + return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts) + }) - env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { - return &appendOnlyBackend{r}, nil - } - err := runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts) if err == nil { t.Error("expected rebuildIndex to fail") } diff --git a/cmd/restic/cmd_restore_integration_test.go b/cmd/restic/cmd_restore_integration_test.go index 266b0c2f6..74fddd347 100644 --- a/cmd/restic/cmd_restore_integration_test.go +++ b/cmd/restic/cmd_restore_integration_test.go @@ -205,12 +205,11 @@ func TestRestoreWithPermissionFailure(t *testing.T) { snapshots := testListSnapshots(t, env.gopts, 1) - globalOptions.stderr = io.Discard - defer func() { - globalOptions.stderr = os.Stderr - }() - - testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0]) + _ = withRestoreGlobalOptions(func() error { + globalOptions.stderr = io.Discard + testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0]) + return nil + }) // make sure that all files have been restored, regardless of any // permission errors diff --git a/cmd/restic/cmd_snapshots_integration_test.go b/cmd/restic/cmd_snapshots_integration_test.go index 607f0bf6b..ba284a9e9 100644 --- a/cmd/restic/cmd_snapshots_integration_test.go +++ b/cmd/restic/cmd_snapshots_integration_test.go @@ -1,10 +1,8 @@ package main import ( - "bytes" "context" "encoding/json" - "os" "testing" "github.com/restic/restic/internal/restic" @@ -12,17 +10,13 @@ import ( ) func testRunSnapshots(t testing.TB, gopts GlobalOptions) (newest *Snapshot, snapmap map[restic.ID]Snapshot) { - buf := bytes.NewBuffer(nil) - globalOptions.stdout = buf - globalOptions.JSON = true - defer func() { - globalOptions.stdout = os.Stdout - globalOptions.JSON = gopts.JSON - }() + buf, err := withCaptureStdout(func() error { + globalOptions.JSON = true - opts := SnapshotOptions{} - - rtest.OK(t, runSnapshots(context.TODO(), opts, globalOptions, []string{})) + opts := SnapshotOptions{} + return runSnapshots(context.TODO(), opts, gopts, []string{}) + }) + rtest.OK(t, err) snapshots := []Snapshot{} rtest.OK(t, json.Unmarshal(buf.Bytes(), &snapshots)) diff --git a/cmd/restic/global_test.go b/cmd/restic/global_test.go index 7d2699a22..4f5c29e9a 100644 --- a/cmd/restic/global_test.go +++ b/cmd/restic/global_test.go @@ -1,7 +1,6 @@ package main import ( - "bytes" "os" "path/filepath" "testing" @@ -10,22 +9,16 @@ import ( ) func Test_PrintFunctionsRespectsGlobalStdout(t *testing.T) { - gopts := globalOptions - defer func() { - globalOptions = gopts - }() - - buf := bytes.NewBuffer(nil) - globalOptions.stdout = buf - for _, p := range []func(){ func() { Println("message") }, func() { Print("message\n") }, func() { Printf("mes%s\n", "sage") }, } { - p() + buf, _ := withCaptureStdout(func() error { + p() + return nil + }) rtest.Equals(t, "message\n", buf.String()) - buf.Reset() } } diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go index 59d9e30d3..cdafd8c98 100644 --- a/cmd/restic/integration_helpers_test.go +++ b/cmd/restic/integration_helpers_test.go @@ -338,3 +338,21 @@ func testFileSize(filename string, size int64) error { return nil } + +func withRestoreGlobalOptions(inner func() error) error { + gopts := globalOptions + defer func() { + globalOptions = gopts + }() + return inner() +} + +func withCaptureStdout(inner func() error) (*bytes.Buffer, error) { + buf := bytes.NewBuffer(nil) + err := withRestoreGlobalOptions(func() error { + globalOptions.stdout = buf + return inner() + }) + + return buf, err +} From 4b3a0b41046e873f9f08ae6e3dbb2f0414344331 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 7 May 2023 22:07:47 +0200 Subject: [PATCH 07/12] read JSON/Quiet flag from the passed in globalOptions --- cmd/restic/cmd_find.go | 2 +- cmd/restic/cmd_find_integration_test.go | 2 +- cmd/restic/cmd_ls_integration_test.go | 2 +- cmd/restic/cmd_restore.go | 2 +- cmd/restic/cmd_snapshots_integration_test.go | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index 1fa597afd..6b5e32df7 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -594,7 +594,7 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args [] f := &Finder{ repo: repo, pat: pat, - out: statefulOutput{ListLong: opts.ListLong, JSON: globalOptions.JSON}, + out: statefulOutput{ListLong: opts.ListLong, JSON: gopts.JSON}, ignoreTrees: restic.NewIDSet(), } diff --git a/cmd/restic/cmd_find_integration_test.go b/cmd/restic/cmd_find_integration_test.go index 236721154..dd8ab87fd 100644 --- a/cmd/restic/cmd_find_integration_test.go +++ b/cmd/restic/cmd_find_integration_test.go @@ -12,7 +12,7 @@ import ( func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern string) []byte { buf, err := withCaptureStdout(func() error { - globalOptions.JSON = wantJSON + gopts.JSON = wantJSON opts := FindOptions{} return runFind(context.TODO(), opts, gopts, []string{pattern}) diff --git a/cmd/restic/cmd_ls_integration_test.go b/cmd/restic/cmd_ls_integration_test.go index a93092f58..39bf9c3b0 100644 --- a/cmd/restic/cmd_ls_integration_test.go +++ b/cmd/restic/cmd_ls_integration_test.go @@ -10,7 +10,7 @@ import ( func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string { buf, err := withCaptureStdout(func() error { - globalOptions.Quiet = true + gopts.Quiet = true opts := LsOptions{} return runLs(context.TODO(), opts, gopts, []string{snapshotID}) }) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 732a942b2..a0d4ce3e4 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -176,7 +176,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, } var progress *restoreui.Progress - if !globalOptions.Quiet && !globalOptions.JSON { + if !gopts.Quiet && !gopts.JSON { progress = restoreui.NewProgress(restoreui.NewProgressPrinter(term), calculateProgressInterval(!gopts.Quiet, gopts.JSON)) } diff --git a/cmd/restic/cmd_snapshots_integration_test.go b/cmd/restic/cmd_snapshots_integration_test.go index ba284a9e9..6eaa8faa4 100644 --- a/cmd/restic/cmd_snapshots_integration_test.go +++ b/cmd/restic/cmd_snapshots_integration_test.go @@ -11,7 +11,7 @@ import ( func testRunSnapshots(t testing.TB, gopts GlobalOptions) (newest *Snapshot, snapmap map[restic.ID]Snapshot) { buf, err := withCaptureStdout(func() error { - globalOptions.JSON = true + gopts.JSON = true opts := SnapshotOptions{} return runSnapshots(context.TODO(), opts, gopts, []string{}) From 7a268e4aba08d6365acb753fb46e7ba8c352930a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 7 May 2023 22:21:56 +0200 Subject: [PATCH 08/12] always access stdout/stderr via globalOptions --- cmd/restic/cmd_backup_integration_test.go | 5 +---- cmd/restic/cmd_cache.go | 2 +- cmd/restic/cmd_debug.go | 10 +++++----- cmd/restic/cmd_diff.go | 4 ++-- cmd/restic/cmd_diff_integration_test.go | 2 -- cmd/restic/cmd_forget.go | 4 ++-- cmd/restic/cmd_init.go | 2 +- cmd/restic/cmd_ls.go | 2 +- cmd/restic/cmd_snapshots.go | 6 +++--- 9 files changed, 16 insertions(+), 21 deletions(-) diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index 769a60c03..5dee23f17 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -24,7 +24,6 @@ func testRunBackupAssumeFailure(t testing.TB, dir string, target []string, opts term := termstatus.New(gopts.stdout, gopts.stderr, gopts.Quiet) wg.Go(func() error { term.Run(ctx); return nil }) - gopts.stdout = io.Discard t.Logf("backing up %v in %v", target, dir) if dir != "" { cleanup := rtest.Chdir(t, dir) @@ -371,9 +370,7 @@ func TestBackupErrors(t *testing.T) { rtest.OK(t, os.Chmod(inaccessibleFile, 0644)) }() opts := BackupOptions{} - gopts := env.gopts - gopts.stderr = io.Discard - err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, gopts) + err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) rtest.Assert(t, err != nil, "Assumed failure, but no error occurred.") rtest.Assert(t, err == ErrInvalidSourceData, "Wrong error returned") testListSnapshots(t, env.gopts, 1) diff --git a/cmd/restic/cmd_cache.go b/cmd/restic/cmd_cache.go index 334063fdc..4a10d1027 100644 --- a/cmd/restic/cmd_cache.go +++ b/cmd/restic/cmd_cache.go @@ -155,7 +155,7 @@ func runCache(opts CacheOptions, gopts GlobalOptions, args []string) error { }) } - _ = tab.Write(gopts.stdout) + _ = tab.Write(globalOptions.stdout) Printf("%d cache dirs in %s\n", len(dirs), cachedir) return nil diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index deade6d22..a54200c45 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -167,20 +167,20 @@ func runDebugDump(ctx context.Context, gopts GlobalOptions, args []string) error switch tpe { case "indexes": - return dumpIndexes(ctx, repo, gopts.stdout) + return dumpIndexes(ctx, repo, globalOptions.stdout) case "snapshots": - return debugPrintSnapshots(ctx, repo, gopts.stdout) + return debugPrintSnapshots(ctx, repo, globalOptions.stdout) case "packs": - return printPacks(ctx, repo, gopts.stdout) + return printPacks(ctx, repo, globalOptions.stdout) case "all": Printf("snapshots:\n") - err := debugPrintSnapshots(ctx, repo, gopts.stdout) + err := debugPrintSnapshots(ctx, repo, globalOptions.stdout) if err != nil { return err } Printf("\nindexes:\n") - err = dumpIndexes(ctx, repo, gopts.stdout) + err = dumpIndexes(ctx, repo, globalOptions.stdout) if err != nil { return err } diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index 0861a7103..3c59b9580 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -381,7 +381,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args [] } if gopts.JSON { - enc := json.NewEncoder(gopts.stdout) + enc := json.NewEncoder(globalOptions.stdout) c.printChange = func(change *Change) { err := enc.Encode(change) if err != nil { @@ -415,7 +415,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args [] updateBlobs(repo, stats.BlobsAfter.Sub(both).Sub(stats.BlobsCommon), &stats.Added) if gopts.JSON { - err := json.NewEncoder(gopts.stdout).Encode(stats) + err := json.NewEncoder(globalOptions.stdout).Encode(stats) if err != nil { Warnf("JSON encode failed: %v\n", err) } diff --git a/cmd/restic/cmd_diff_integration_test.go b/cmd/restic/cmd_diff_integration_test.go index c46f00f4f..8782053ed 100644 --- a/cmd/restic/cmd_diff_integration_test.go +++ b/cmd/restic/cmd_diff_integration_test.go @@ -16,8 +16,6 @@ import ( func testRunDiffOutput(gopts GlobalOptions, firstSnapshotID string, secondSnapshotID string) (string, error) { buf, err := withCaptureStdout(func() error { - gopts.stdout = globalOptions.stdout - opts := DiffOptions{ ShowMetadata: false, } diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index eb8e7adde..d8e64bc6a 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -193,7 +193,7 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg for k, snapshotGroup := range snapshotGroups { if gopts.Verbose >= 1 && !gopts.JSON { - err = PrintSnapshotGroupHeader(gopts.stdout, k) + err = PrintSnapshotGroupHeader(globalOptions.stdout, k) if err != nil { return err } @@ -250,7 +250,7 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg } if gopts.JSON && len(jsonGroups) > 0 { - err = printJSONForget(gopts.stdout, jsonGroups) + err = printJSONForget(globalOptions.stdout, jsonGroups) if err != nil { return err } diff --git a/cmd/restic/cmd_init.go b/cmd/restic/cmd_init.go index 213c7e898..43de7ff89 100644 --- a/cmd/restic/cmd_init.go +++ b/cmd/restic/cmd_init.go @@ -123,7 +123,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args [] ID: s.Config().ID, Repository: location.StripPassword(gopts.Repo), } - return json.NewEncoder(gopts.stdout).Encode(status) + return json.NewEncoder(globalOptions.stdout).Encode(status) } return nil diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index aeaa750eb..e8c27381c 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -181,7 +181,7 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri ) if gopts.JSON { - enc := json.NewEncoder(gopts.stdout) + enc := json.NewEncoder(globalOptions.stdout) printSnapshot = func(sn *restic.Snapshot) { err := enc.Encode(lsSnapshot{ diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index ba3644ee7..889ac5e20 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -94,7 +94,7 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions } if gopts.JSON { - err := printSnapshotGroupJSON(gopts.stdout, snapshotGroups, grouped) + err := printSnapshotGroupJSON(globalOptions.stdout, snapshotGroups, grouped) if err != nil { Warnf("error printing snapshots: %v\n", err) } @@ -103,13 +103,13 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions for k, list := range snapshotGroups { if grouped { - err := PrintSnapshotGroupHeader(gopts.stdout, k) + err := PrintSnapshotGroupHeader(globalOptions.stdout, k) if err != nil { Warnf("error printing snapshots: %v\n", err) return nil } } - PrintSnapshots(gopts.stdout, list, nil, opts.Compact) + PrintSnapshots(globalOptions.stdout, list, nil, opts.Compact) } return nil From 692f81ede806fb785a147b18c6928f919b77edb3 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 8 May 2023 18:36:42 +0200 Subject: [PATCH 09/12] cleanup prune integration test --- cmd/restic/cmd_prune_integration_test.go | 26 ++++++++---------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/cmd/restic/cmd_prune_integration_test.go b/cmd/restic/cmd_prune_integration_test.go index 4a3ccd232..2cd86d895 100644 --- a/cmd/restic/cmd_prune_integration_test.go +++ b/cmd/restic/cmd_prune_integration_test.go @@ -1,10 +1,8 @@ package main import ( - "bytes" "context" "encoding/json" - "os" "path/filepath" "testing" @@ -77,21 +75,15 @@ func createPrunableRepo(t *testing.T, env *testEnvironment) { } func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) { - buf := bytes.NewBuffer(nil) - oldJSON := gopts.JSON - gopts.stdout = buf - gopts.JSON = true - defer func() { - gopts.stdout = os.Stdout - gopts.JSON = oldJSON - }() - - opts := ForgetOptions{ - DryRun: true, - Last: 1, - } - - rtest.OK(t, runForget(context.TODO(), opts, gopts, args)) + buf, err := withCaptureStdout(func() error { + gopts.JSON = true + opts := ForgetOptions{ + DryRun: true, + Last: 1, + } + return runForget(context.TODO(), opts, gopts, args) + }) + rtest.OK(t, err) var forgets []*ForgetGroup rtest.OK(t, json.Unmarshal(buf.Bytes(), &forgets)) From b93459cbb09afea6c32ce37d7181f2c8762e1657 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 8 May 2023 18:38:55 +0200 Subject: [PATCH 10/12] repair snapshots: use local copy of globalOptions to open repository --- cmd/restic/cmd_repair_snapshots.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index 5e9ec4130..03736795c 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -67,7 +67,7 @@ func init() { } func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOptions, args []string) error { - repo, err := OpenRepository(ctx, globalOptions) + repo, err := OpenRepository(ctx, gopts) if err != nil { return err } From 8e913e6d3a2626231beae913358dcd6c037b36d4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 8 May 2023 18:40:23 +0200 Subject: [PATCH 11/12] repair index: always read Quiet flags from GlobalOptions passed as parameter --- cmd/restic/cmd_repair_index.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/restic/cmd_repair_index.go b/cmd/restic/cmd_repair_index.go index bacdf83e0..b1905836a 100644 --- a/cmd/restic/cmd_repair_index.go +++ b/cmd/restic/cmd_repair_index.go @@ -142,7 +142,7 @@ func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOpti if len(packSizeFromList) > 0 { Verbosef("reading pack files\n") - bar := newProgressMax(!globalOptions.Quiet, uint64(len(packSizeFromList)), "packs") + bar := newProgressMax(!gopts.Quiet, uint64(len(packSizeFromList)), "packs") invalidFiles, err := repo.CreateIndexFromPacks(ctx, packSizeFromList, bar) bar.Done() if err != nil { From 9747cef338a474a2048ecc5c798f3cf42a834ddd Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 8 May 2023 19:46:49 +0200 Subject: [PATCH 12/12] fix linter warnings --- cmd/restic/cmd_backup_integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index 5dee23f17..3af16a2be 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -205,7 +205,7 @@ func TestBackupNonExistingFile(t *testing.T) { testSetupBackupData(t, env) - withRestoreGlobalOptions(func() error { + _ = withRestoreGlobalOptions(func() error { globalOptions.stderr = io.Discard p := filepath.Join(env.testdata, "0", "0", "9")