Merge branch 'master' into add-smb-backend

This commit is contained in:
Aneesh N 2024-01-09 16:06:36 -07:00 committed by GitHub
commit 83c1740425
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 487 additions and 151 deletions

View File

@ -68,7 +68,7 @@ jobs:
steps:
- name: Set up Go ${{ matrix.go }}
uses: actions/setup-go@v4
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
@ -344,7 +344,7 @@ jobs:
steps:
- name: Set up Go ${{ env.latest_go }}
uses: actions/setup-go@v4
uses: actions/setup-go@v5
with:
go-version: ${{ env.latest_go }}
@ -362,7 +362,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Set up Go ${{ env.latest_go }}
uses: actions/setup-go@v4
uses: actions/setup-go@v5
with:
go-version: ${{ env.latest_go }}

1
.gitignore vendored
View File

@ -1,3 +1,4 @@
/.idea
/restic
/restic.exe
/.vagrant

View File

@ -0,0 +1,18 @@
Enhancement: Allow AWS Assume Role to be used for S3 backend
Previously only credentials discovered via the Minio discovery methods
were used to authenticate.
However, there are many circumstances where the discovered credentials have
lower permissions and need to assume a specific role. This is now possible
using the following new environment variables.
- RESTIC_AWS_ASSUME_ROLE_ARN
- RESTIC_AWS_ASSUME_ROLE_SESSION_NAME
- RESTIC_AWS_ASSUME_ROLE_EXTERNAL_ID
- RESTIC_AWS_ASSUME_ROLE_REGION (defaults to us-east-1)
- RESTIC_AWS_ASSUME_ROLE_POLICY
- RESTIC_AWS_ASSUME_ROLE_STS_ENDPOINT
https://github.com/restic/restic/issues/4472
https://github.com/restic/restic/pull/4474

View File

@ -0,0 +1,14 @@
Bugfix: Improve errors for irregular files on Windows
Since Go 1.21, most filesystem reparse points on Windows are considered to be
irregular files. This caused restic to show an `error: invalid node type ""`
error message for those files.
We have improved the error message to include the file path for those files:
`error: nodeFromFileInfo path/to/file: unsupported file type "irregular"`.
As irregular files are not required to behave like regular files, it is not
possible to provide a generic way to back up those files.
https://github.com/restic/restic/issues/4560
https://github.com/restic/restic/pull/4620
https://forum.restic.net/t/windows-backup-error-invalid-node-type/6875

View File

@ -0,0 +1,11 @@
Bugfix: support backup of deduplicated files on Windows again
With the official release builds of restic 0.16.1 and 0.16.2, it was not
possible to back up files that were deduplicated by the corresponding Windows
Server feature. This also applies to restic versions built using Go
1.21.0 - 1.21.4.
We have updated the used Go version to fix this.
https://github.com/restic/restic/issues/4574
https://github.com/restic/restic/pull/4621

View File

@ -0,0 +1,11 @@
Bugfix: Improve error handling for `rclone` backend
Since restic 0.16.0, if rclone encountered an error while listing files,
this could in rare circumstances cause restic to assume that there are no
files. Although unlikely, this situation could result in data loss if it
were to happen right when the `prune` command is listing existing snapshots.
Error handling has now been improved to detect and work around this case.
https://github.com/restic/restic/issues/4612
https://github.com/restic/restic/pull/4618

View File

@ -0,0 +1,11 @@
Enhancement: Add bitrot detection to `diff` command
The output of the `diff` command now includes the modifier `?` for files
to indicate bitrot in backed up files. It will appear whenever there is a
difference in content while the metadata is exactly the same. Since files with
unchanged metadata are normally not read again when creating a backup, the
detection is only effective if the right-hand side of the diff has been created
with "backup --force".
https://github.com/restic/restic/issues/805
https://github.com/restic/restic/pull/4526

View File

@ -0,0 +1,11 @@
Bugfix: Correct restore progress information if an error occurs
If an error occurred while restoring a snapshot, this could cause the restore
progress bar to show incorrect information. In addition, if a data file could
not be loaded completely, then errors would also be reported for some already
restored files.
We have improved the error reporting of the restore command to be more accurate.
https://github.com/restic/restic/pull/4624
https://forum.restic.net/t/errors-restoring-with-restic-on-windows-server-s3/6943

View File

@ -0,0 +1,11 @@
Bugfix: Improve reliability of restoring large files
In some cases restic failed to restore large files that frequently contain the
same file chunk. In combination with certain backends, this could result in
network connection timeouts that caused incomplete restores.
Restic now includes special handling for such file chunks to ensure reliable
restores.
https://github.com/restic/restic/pull/4626
https://forum.restic.net/t/errors-restoring-with-restic-on-windows-server-s3/6943

View File

@ -126,11 +126,12 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
if sn.Original != nil {
srcOriginal = *sn.Original
}
if originalSns, ok := dstSnapshotByOriginal[srcOriginal]; ok {
isCopy := false
for _, originalSn := range originalSns {
if similarSnapshots(originalSn, sn) {
Verboseff("\nsnapshot %s of %v at %s)\n", sn.ID().Str(), sn.Paths, sn.Time)
Verboseff("\n%v\n", sn)
Verboseff("skipping source snapshot %s, was already copied to snapshot %s\n", sn.ID().Str(), originalSn.ID().Str())
isCopy = true
break
@ -140,7 +141,7 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
continue
}
}
Verbosef("\nsnapshot %s of %v at %s)\n", sn.ID().Str(), sn.Paths, sn.Time)
Verbosef("\n%v\n", sn)
Verbosef(" copy started, this may take a while...\n")
if err := copyTree(ctx, srcRepo, dstRepo, visitedTrees, *sn.Tree, gopts.Quiet); err != nil {
return err

View File

@ -27,6 +27,10 @@ directory:
* U The metadata (access mode, timestamps, ...) for the item was updated
* M The file's content was modified
* T The type was changed, e.g. a file was made a symlink
* ? Bitrot detected: The file's content has changed but all metadata is the same
Metadata comparison will likely not work if a backup was created using the
'--ignore-inode' or '--ignore-ctime' option.
To only compare files in specific subfolders, you can use the
"<snapshotID>:<subfolder>" syntax, where "subfolder" is a path within the
@ -272,6 +276,16 @@ func (c *Comparer) diffTree(ctx context.Context, stats *DiffStatsContainer, pref
!reflect.DeepEqual(node1.Content, node2.Content) {
mod += "M"
stats.ChangedFiles++
node1NilContent := *node1
node2NilContent := *node2
node1NilContent.Content = nil
node2NilContent.Content = nil
// the bitrot detection may not work if `backup --ignore-inode` or `--ignore-ctime` were used
if node1NilContent.Equals(node2NilContent) {
// probable bitrot detected
mod += "?"
}
} else if c.opts.ShowMetadata && !node1.Equals(*node2) {
mod += "U"
}

View File

@ -207,7 +207,7 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri
}
} else {
printSnapshot = func(sn *restic.Snapshot) {
Verbosef("snapshot %s of %v filtered by %v at %s):\n", sn.ID().Str(), sn.Paths, dirs, sn.Time)
Verbosef("%v filtered by %v:\n", sn, dirs)
}
printNode = func(path string, node *restic.Node) {
Printf("%s\n", formatNode(path, node, lsOptions.ListLong, lsOptions.HumanReadable))

View File

@ -144,7 +144,7 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt
changedCount := 0
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, args) {
Verbosef("\nsnapshot %s of %v at %s)\n", sn.ID().Str(), sn.Paths, sn.Time)
Verbosef("\n%v\n", sn)
changed, err := filterAndReplaceSnapshot(ctx, repo, sn,
func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) {
return rewriter.RewriteTree(ctx, repo, "/", *sn.Tree)

View File

@ -290,7 +290,7 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a
changedCount := 0
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, args) {
Verbosef("\nsnapshot %s of %v at %s)\n", sn.ID().Str(), sn.Paths, sn.Time)
Verbosef("\n%v\n", sn)
changed, err := rewriteSnapshot(ctx, repo, sn, opts)
if err != nil {
return errors.Fatalf("unable to rewrite snapshot ID %q: %v", sn.ID().Str(), err)

View File

@ -628,6 +628,12 @@ environment variables. The following lists these environment variables:
AWS_DEFAULT_REGION Amazon S3 default region
AWS_PROFILE Amazon credentials profile (alternative to specifying key and region)
AWS_SHARED_CREDENTIALS_FILE Location of the AWS CLI shared credentials file (default: ~/.aws/credentials)
RESTIC_AWS_ASSUME_ROLE_ARN Amazon IAM Role ARN to assume using discovered credentials
RESTIC_AWS_ASSUME_ROLE_SESSION_NAME Session Name to use with the role assumption
RESTIC_AWS_ASSUME_ROLE_EXTERNAL_ID External ID to use with the role assumption
RESTIC_AWS_ASSUME_ROLE_POLICY Inline Amazion IAM session policy
RESTIC_AWS_ASSUME_ROLE_REGION Region to use for IAM calls for the role assumption (default: us-east-1)
RESTIC_AWS_ASSUME_ROLE_STS_ENDPOINT URL to the STS endpoint (default is determined based on RESTIC_AWS_ASSUME_ROLE_REGION). You generally do not need to set this, advanced use only.
AZURE_ACCOUNT_NAME Account name for Azure
AZURE_ACCOUNT_KEY Account key for Azure

View File

@ -94,11 +94,11 @@ example from a local to a remote repository, you can use the ``copy`` command:
repository d6504c63 opened successfully, password is correct
repository 3dd0878c opened successfully, password is correct
snapshot 410b18a2 of [/home/user/work] at 2020-06-09 23:15:57.305305 +0200 CEST)
snapshot 410b18a2 of [/home/user/work] at 2020-06-09 23:15:57.305305 +0200 CEST by user@kasimir
copy started, this may take a while...
snapshot 7a746a07 saved
snapshot 4e5d5487 of [/home/user/work] at 2020-05-01 22:44:07.012113 +0200 CEST)
snapshot 4e5d5487 of [/home/user/work] at 2020-05-01 22:44:07.012113 +0200 CEST by user@kasimir
skipping snapshot 4e5d5487, was already copied to snapshot 50eb62b7
The example command copies all snapshots from the source repository
@ -193,18 +193,18 @@ the unwanted files from affected snapshots by rewriting them using the
$ restic -r /srv/restic-repo rewrite --exclude secret-file
repository c881945a opened (repository version 2) successfully, password is correct
snapshot 6160ddb2 of [/home/user/work] at 2022-06-12 16:01:28.406630608 +0200 CEST)
snapshot 6160ddb2 of [/home/user/work] at 2022-06-12 16:01:28.406630608 +0200 CEST by user@kasimir
excluding /home/user/work/secret-file
saved new snapshot b6aee1ff
snapshot 4fbaf325 of [/home/user/work] at 2022-05-01 11:22:26.500093107 +0200 CEST)
snapshot 4fbaf325 of [/home/user/work] at 2022-05-01 11:22:26.500093107 +0200 CEST by user@kasimir
modified 1 snapshots
$ restic -r /srv/restic-repo rewrite --exclude secret-file 6160ddb2
repository c881945a opened (repository version 2) successfully, password is correct
snapshot 6160ddb2 of [/home/user/work] at 2022-06-12 16:01:28.406630608 +0200 CEST)
snapshot 6160ddb2 of [/home/user/work] at 2022-06-12 16:01:28.406630608 +0200 CEST by user@kasimir
excluding /home/user/work/secret-file
new snapshot saved as b6aee1ff
@ -247,7 +247,7 @@ This is possible using the ``rewrite`` command with the option ``--new-host`` fo
repository b7dbade3 opened (version 2, compression level auto)
[0:00] 100.00% 1 / 1 index files loaded
snapshot 8ed674f4 of [/path/to/abc.txt] at 2023-11-27 21:57:52.439139291 +0100 CET)
snapshot 8ed674f4 of [/path/to/abc.txt] at 2023-11-27 21:57:52.439139291 +0100 CET by user@kasimir
setting time to 1999-01-01 11:11:11 +0100 CET
setting host to newhost
saved new snapshot c05da643

View File

@ -201,7 +201,8 @@ change
+------------------+--------------------------------------------------------------+
| ``modifier`` | Type of change, a concatenation of the following characters: |
| | "+" = added, "-" = removed, "T" = entry type changed, |
| | "M" = file content changed, "U" = metadata changed |
| | "M" = file content changed, "U" = metadata changed, |
| | "?" = bitrot detected |
+------------------+--------------------------------------------------------------+
statistics

View File

@ -153,7 +153,7 @@ command will automatically remove the original, damaged snapshots.
$ restic repair snapshots --forget
snapshot 6979421e of [/home/user/restic/restic] at 2022-11-02 20:59:18.617503315 +0100 CET)
snapshot 6979421e of [/home/user/restic/restic] at 2022-11-02 20:59:18.617503315 +0100 CET by user@host
file "/restic/internal/fuse/snapshots_dir.go": removed missing content
file "/restic/internal/restorer/restorer_unix_test.go": removed missing content
file "/restic/internal/walker/walker.go": removed missing content

14
go.mod
View File

@ -2,7 +2,7 @@ module github.com/restic/restic
require (
cloud.google.com/go/storage v1.34.0
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0
github.com/Backblaze/blazer v0.6.1
@ -15,8 +15,8 @@ require (
github.com/google/uuid v1.5.0
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/hirochachacha/go-smb2 v1.1.0
github.com/klauspost/compress v1.17.2
github.com/minio/minio-go/v7 v7.0.63
github.com/klauspost/compress v1.17.4
github.com/minio/minio-go/v7 v7.0.66
github.com/minio/sha256-simd v1.0.1
github.com/ncw/swift/v2 v2.0.2
github.com/pkg/errors v0.9.1
@ -27,7 +27,7 @@ require (
github.com/spf13/cobra v1.7.0
github.com/spf13/pflag v1.0.5
go.uber.org/automaxprocs v1.5.3
golang.org/x/crypto v0.16.0
golang.org/x/crypto v0.17.0
golang.org/x/net v0.19.0
golang.org/x/oauth2 v0.15.0
golang.org/x/sync v0.5.0
@ -43,7 +43,7 @@ require (
cloud.google.com/go/compute v1.23.1 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v1.1.3 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.4.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
@ -54,11 +54,11 @@ require (
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect

24
go.sum
View File

@ -9,12 +9,12 @@ cloud.google.com/go/iam v1.1.3 h1:18tKG7DzydKWUnLjonWcJO6wjSCAtzh4GcRKlH/Hrzc=
cloud.google.com/go/iam v1.1.3/go.mod h1:3khUlaBXfPKKe7huYgEpDn6FtgRyMEqbkvBxrQyY5SE=
cloud.google.com/go/storage v1.34.0 h1:9KHBBTbaHPsNxO043SFmH3pMojjZiW+BFl9H41L7xjk=
cloud.google.com/go/storage v1.34.0/go.mod h1:Eji+S0CCQebjsiXxyIvPItC3BN3zWsdJjWfHfoLblgY=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 h1:9kDVnTz3vbfweTqAUmk/a/pH5pWFCHtvRpHYC0G/dcA=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.4.0 h1:TuEMD+E+1aTjjLICGQOW6vLe8UWES7kopac9mUXL56Y=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.4.0/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4=
@ -112,11 +112,11 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
@ -126,8 +126,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.63 h1:GbZ2oCvaUdgT5640WJOpyDhhDxvknAJU2/T3yurwcbQ=
github.com/minio/minio-go/v7 v7.0.63/go.mod h1:Q6X7Qjb7WMhvG65qKf4gUgA5XaiSox74kR1uAEjxRS4=
github.com/minio/minio-go/v7 v7.0.66 h1:bnTOXOHjOqv/gcMuiVbN9o2ngRItvqE774dG9nq0Dzw=
github.com/minio/minio-go/v7 v7.0.66/go.mod h1:DHAgmyQEGdW3Cif0UooKOyrT3Vxs82zNdV6tkKhRtbs=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -188,8 +188,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=

View File

@ -2,10 +2,12 @@ package archiver
import (
"context"
"fmt"
"os"
"path"
"runtime"
"sort"
"strings"
"time"
"github.com/restic/restic/internal/debug"
@ -168,6 +170,11 @@ func (arch *Archiver) error(item string, err error) error {
return err
}
// not all errors include the filepath, thus add it if it is missing
if !strings.Contains(err.Error(), item) {
err = fmt.Errorf("%v: %w", item, err)
}
errf := arch.Error(item, err)
if err != errf {
debug.Log("item %v: error was filtered by handler, before: %q, after: %v", item, err, errf)
@ -183,7 +190,10 @@ func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo)
}
// overwrite name to match that within the snapshot
node.Name = path.Base(snPath)
return node, errors.WithStack(err)
if err != nil {
return node, fmt.Errorf("nodeFromFileInfo %v: %w", filename, err)
}
return node, err
}
// loadSubtree tries to load the subtree referenced by node. In case of an error, nil is returned.

View File

@ -328,8 +328,13 @@ func (b *Backend) List(ctx context.Context, t backend.FileType, fn func(backend.
}
if resp.StatusCode == http.StatusNotFound {
// ignore missing directories
return nil
if !strings.HasPrefix(resp.Header.Get("Server"), "rclone/") {
// ignore missing directories, unless the server is rclone. rclone
// already ignores missing directories, but misuses "not found" to
// report certain internal errors, see
// https://github.com/rclone/rclone/pull/7550 for details.
return nil
}
}
if resp.StatusCode != http.StatusOK {

View File

@ -51,40 +51,9 @@ func open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, erro
minio.MaxRetry = int(cfg.MaxRetries)
}
// Chains all credential types, in the following order:
// - Static credentials provided by user
// - AWS env vars (i.e. AWS_ACCESS_KEY_ID)
// - Minio env vars (i.e. MINIO_ACCESS_KEY)
// - AWS creds file (i.e. AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials)
// - Minio creds file (i.e. MINIO_SHARED_CREDENTIALS_FILE or ~/.mc/config.json)
// - IAM profile based credentials. (performs an HTTP
// call to a pre-defined endpoint, only valid inside
// configured ec2 instances)
creds := credentials.NewChainCredentials([]credentials.Provider{
&credentials.EnvAWS{},
&credentials.Static{
Value: credentials.Value{
AccessKeyID: cfg.KeyID,
SecretAccessKey: cfg.Secret.Unwrap(),
},
},
&credentials.EnvMinio{},
&credentials.FileAWSCredentials{},
&credentials.FileMinioClient{},
&credentials.IAM{
Client: &http.Client{
Transport: http.DefaultTransport,
},
},
})
c, err := creds.Get()
creds, err := getCredentials(cfg)
if err != nil {
return nil, errors.Wrap(err, "creds.Get")
}
if c.SignerType == credentials.SignatureAnonymous {
debug.Log("using anonymous access for %#v", cfg.Endpoint)
return nil, errors.Wrap(err, "s3.getCredentials")
}
options := &minio.Options{
@ -125,6 +94,91 @@ func open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, erro
return be, nil
}
// getCredentials -- runs through the various credential types and returns the first one that works.
// additionally if the user has specified a role to assume, it will do that as well.
func getCredentials(cfg Config) (*credentials.Credentials, error) {
// Chains all credential types, in the following order:
// - Static credentials provided by user
// - AWS env vars (i.e. AWS_ACCESS_KEY_ID)
// - Minio env vars (i.e. MINIO_ACCESS_KEY)
// - AWS creds file (i.e. AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials)
// - Minio creds file (i.e. MINIO_SHARED_CREDENTIALS_FILE or ~/.mc/config.json)
// - IAM profile based credentials. (performs an HTTP
// call to a pre-defined endpoint, only valid inside
// configured ec2 instances)
creds := credentials.NewChainCredentials([]credentials.Provider{
&credentials.EnvAWS{},
&credentials.Static{
Value: credentials.Value{
AccessKeyID: cfg.KeyID,
SecretAccessKey: cfg.Secret.Unwrap(),
},
},
&credentials.EnvMinio{},
&credentials.FileAWSCredentials{},
&credentials.FileMinioClient{},
&credentials.IAM{
Client: &http.Client{
Transport: http.DefaultTransport,
},
},
})
c, err := creds.Get()
if err != nil {
return nil, errors.Wrap(err, "creds.Get")
}
if c.SignerType == credentials.SignatureAnonymous {
debug.Log("using anonymous access for %#v", cfg.Endpoint)
}
roleArn := os.Getenv("RESTIC_AWS_ASSUME_ROLE_ARN")
if roleArn != "" {
// use the region provided by the configuration by default
awsRegion := cfg.Region
// allow the region to be overridden if for some reason it is required
if os.Getenv("RESTIC_AWS_ASSUME_ROLE_REGION") != "" {
awsRegion = os.Getenv("RESTIC_AWS_ASSUME_ROLE_REGION")
}
sessionName := os.Getenv("RESTIC_AWS_ASSUME_ROLE_SESSION_NAME")
externalID := os.Getenv("RESTIC_AWS_ASSUME_ROLE_EXTERNAL_ID")
policy := os.Getenv("RESTIC_AWS_ASSUME_ROLE_POLICY")
stsEndpoint := os.Getenv("RESTIC_AWS_ASSUME_ROLE_STS_ENDPOINT")
if stsEndpoint == "" {
if awsRegion != "" {
if strings.HasPrefix(awsRegion, "cn-") {
stsEndpoint = "https://sts." + awsRegion + ".amazonaws.com.cn"
} else {
stsEndpoint = "https://sts." + awsRegion + ".amazonaws.com"
}
} else {
stsEndpoint = "https://sts.amazonaws.com"
}
}
opts := credentials.STSAssumeRoleOptions{
RoleARN: roleArn,
AccessKey: c.AccessKeyID,
SecretKey: c.SecretAccessKey,
SessionToken: c.SessionToken,
RoleSessionName: sessionName,
ExternalID: externalID,
Policy: policy,
Location: awsRegion,
}
creds, err = credentials.NewSTSAssumeRole(stsEndpoint, opts)
if err != nil {
return nil, errors.Wrap(err, "creds.AssumeRole")
}
}
return creds, nil
}
// Open opens the S3 backend at bucket and region. The bucket is created if it
// does not exist yet.
func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, error) {

View File

@ -881,9 +881,9 @@ type BackendLoadFn func(ctx context.Context, h backend.Handle, length int, offse
const maxUnusedRange = 4 * 1024 * 1024
// StreamPack loads the listed blobs from the specified pack file. The plaintext blob is passed to
// the handleBlobFn callback or an error if decryption failed or the blob hash does not match. In
// case of download errors handleBlobFn might be called multiple times for the same blob. If the
// callback returns an error, then StreamPack will abort and not retry it.
// the handleBlobFn callback or an error if decryption failed or the blob hash does not match.
// handleBlobFn is never called multiple times for the same blob. If the callback returns an error,
// then StreamPack will abort and not retry it.
func StreamPack(ctx context.Context, beLoad BackendLoadFn, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error {
if len(blobs) == 0 {
// nothing to do
@ -945,7 +945,9 @@ func streamPackPart(ctx context.Context, beLoad BackendLoadFn, key *crypto.Key,
currentBlobEnd := dataStart
var buf []byte
var decode []byte
for _, entry := range blobs {
for len(blobs) > 0 {
entry := blobs[0]
skipBytes := int(entry.Offset - currentBlobEnd)
if skipBytes < 0 {
return errors.Errorf("overlapping blobs in pack %v", packID)
@ -1008,6 +1010,8 @@ func streamPackPart(ctx context.Context, beLoad BackendLoadFn, key *crypto.Key,
cancel()
return backoff.Permanent(err)
}
// ensure that each blob is only passed once to handleBlobFn
blobs = blobs[1:]
}
return nil
})

View File

@ -5,6 +5,7 @@ import (
"context"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"io"
"math/rand"
@ -14,6 +15,7 @@ import (
"testing"
"time"
"github.com/cenkalti/backoff/v4"
"github.com/google/go-cmp/cmp"
"github.com/klauspost/compress/zstd"
"github.com/restic/restic/internal/backend"
@ -529,7 +531,9 @@ func testStreamPack(t *testing.T, version uint) {
packfileBlobs, packfile := buildPackfileWithoutHeader(blobSizes, &key, compress)
loadCalls := 0
load := func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
shortFirstLoad := false
loadBytes := func(length int, offset int64) []byte {
data := packfile
if offset > int64(len(data)) {
@ -541,32 +545,56 @@ func testStreamPack(t *testing.T, version uint) {
if length > len(data) {
length = len(data)
}
if shortFirstLoad {
length /= 2
shortFirstLoad = false
}
return data[:length]
}
load := func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
data := loadBytes(length, offset)
if shortFirstLoad {
data = data[:len(data)/2]
shortFirstLoad = false
}
data = data[:length]
loadCalls++
return fn(bytes.NewReader(data))
err := fn(bytes.NewReader(data))
if err == nil {
return nil
}
var permanent *backoff.PermanentError
if errors.As(err, &permanent) {
return err
}
// retry loading once
return fn(bytes.NewReader(loadBytes(length, offset)))
}
// first, test regular usage
t.Run("regular", func(t *testing.T) {
tests := []struct {
blobs []restic.Blob
calls int
blobs []restic.Blob
calls int
shortFirstLoad bool
}{
{packfileBlobs[1:2], 1},
{packfileBlobs[2:5], 1},
{packfileBlobs[2:8], 1},
{packfileBlobs[1:2], 1, false},
{packfileBlobs[2:5], 1, false},
{packfileBlobs[2:8], 1, false},
{[]restic.Blob{
packfileBlobs[0],
packfileBlobs[4],
packfileBlobs[2],
}, 1},
}, 1, false},
{[]restic.Blob{
packfileBlobs[0],
packfileBlobs[len(packfileBlobs)-1],
}, 2},
}, 2, false},
{packfileBlobs[:], 1, true},
}
for _, test := range tests {
@ -593,6 +621,7 @@ func testStreamPack(t *testing.T, version uint) {
}
loadCalls = 0
shortFirstLoad = test.shortFirstLoad
err = repository.StreamPack(ctx, load, &key, restic.ID{}, test.blobs, handleBlob)
if err != nil {
t.Fatal(err)
@ -605,6 +634,7 @@ func testStreamPack(t *testing.T, version uint) {
})
}
})
shortFirstLoad = false
// next, test invalid uses, which should return an error
t.Run("invalid", func(t *testing.T) {

View File

@ -109,7 +109,7 @@ func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) {
}
func nodeTypeFromFileInfo(fi os.FileInfo) string {
switch fi.Mode() & (os.ModeType | os.ModeCharDevice) {
switch fi.Mode() & os.ModeType {
case 0:
return "file"
case os.ModeDir:
@ -124,6 +124,8 @@ func nodeTypeFromFileInfo(fi os.FileInfo) string {
return "fifo"
case os.ModeSocket:
return "socket"
case os.ModeIrregular:
return "irregular"
}
return ""
@ -622,7 +624,7 @@ func (node *Node) fillExtra(path string, fi os.FileInfo) error {
case "fifo":
case "socket":
default:
return errors.Errorf("invalid node type %q", node.Type)
return errors.Errorf("unsupported file type %q", node.Type)
}
return node.fillExtendedAttributes(path)

View File

@ -96,7 +96,7 @@ func ForAllSnapshots(ctx context.Context, be Lister, loader LoaderUnpacked, excl
}
func (sn Snapshot) String() string {
return fmt.Sprintf("<Snapshot %s of %v at %s by %s@%s>",
return fmt.Sprintf("snapshot %s of %v at %s by %s@%s",
sn.id.Str(), sn.Paths, sn.Time, sn.Username, sn.Hostname)
}

View File

@ -197,19 +197,20 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error {
return wg.Wait()
}
func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error {
type blobToFileOffsetsMapping map[restic.ID]struct {
files map[*fileInfo][]int64 // file -> offsets (plural!) of the blob in the file
blob restic.Blob
}
func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error {
// calculate blob->[]files->[]offsets mappings
blobs := make(map[restic.ID]struct {
files map[*fileInfo][]int64 // file -> offsets (plural!) of the blob in the file
})
var blobList []restic.Blob
blobs := make(blobToFileOffsetsMapping)
for file := range pack.files {
addBlob := func(blob restic.Blob, fileOffset int64) {
blobInfo, ok := blobs[blob.ID]
if !ok {
blobInfo.files = make(map[*fileInfo][]int64)
blobList = append(blobList, blob)
blobInfo.blob = blob
blobs[blob.ID] = blobInfo
}
blobInfo.files[file] = append(blobInfo.files[file], fileOffset)
@ -239,65 +240,120 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error {
}
}
sanitizeError := func(file *fileInfo, err error) error {
if err != nil {
err = r.Error(file.location, err)
// track already processed blobs for precise error reporting
processedBlobs := restic.NewBlobSet()
for _, entry := range blobs {
occurrences := 0
for _, offsets := range entry.files {
occurrences += len(offsets)
}
// With a maximum blob size of 8MB, the normal blob streaming has to write
// at most 800MB for a single blob. This should be short enough to avoid
// network connection timeouts. Based on a quick test, a limit of 100 only
// selects a very small number of blobs (the number of references per blob
// - aka. `count` - seem to follow a expontential distribution)
if occurrences > 100 {
// process frequently referenced blobs first as these can take a long time to write
// which can cause backend connections to time out
delete(blobs, entry.blob.ID)
partialBlobs := blobToFileOffsetsMapping{entry.blob.ID: entry}
err := r.downloadBlobs(ctx, pack.id, partialBlobs, processedBlobs)
if err := r.reportError(blobs, processedBlobs, err); err != nil {
return err
}
}
return err
}
err := repository.StreamPack(ctx, r.packLoader, r.key, pack.id, blobList, func(h restic.BlobHandle, blobData []byte, err error) error {
blob := blobs[h.ID]
if err != nil {
for file := range blob.files {
if errFile := sanitizeError(file, err); errFile != nil {
return errFile
if len(blobs) == 0 {
return nil
}
err := r.downloadBlobs(ctx, pack.id, blobs, processedBlobs)
return r.reportError(blobs, processedBlobs, err)
}
func (r *fileRestorer) sanitizeError(file *fileInfo, err error) error {
if err != nil {
err = r.Error(file.location, err)
}
return err
}
func (r *fileRestorer) reportError(blobs blobToFileOffsetsMapping, processedBlobs restic.BlobSet, err error) error {
if err == nil {
return nil
}
// only report error for not yet processed blobs
affectedFiles := make(map[*fileInfo]struct{})
for _, entry := range blobs {
if processedBlobs.Has(entry.blob.BlobHandle) {
continue
}
for file := range entry.files {
affectedFiles[file] = struct{}{}
}
}
for file := range affectedFiles {
if errFile := r.sanitizeError(file, err); errFile != nil {
return errFile
}
}
return nil
}
func (r *fileRestorer) downloadBlobs(ctx context.Context, packID restic.ID,
blobs blobToFileOffsetsMapping, processedBlobs restic.BlobSet) error {
blobList := make([]restic.Blob, 0, len(blobs))
for _, entry := range blobs {
blobList = append(blobList, entry.blob)
}
return repository.StreamPack(ctx, r.packLoader, r.key, packID, blobList,
func(h restic.BlobHandle, blobData []byte, err error) error {
processedBlobs.Insert(h)
blob := blobs[h.ID]
if err != nil {
for file := range blob.files {
if errFile := r.sanitizeError(file, err); errFile != nil {
return errFile
}
}
return nil
}
for file, offsets := range blob.files {
for _, offset := range offsets {
writeToFile := func() error {
// this looks overly complicated and needs explanation
// two competing requirements:
// - must create the file once and only once
// - should allow concurrent writes to the file
// so write the first blob while holding file lock
// write other blobs after releasing the lock
createSize := int64(-1)
file.lock.Lock()
if file.inProgress {
file.lock.Unlock()
} else {
defer file.lock.Unlock()
file.inProgress = true
createSize = file.size
}
writeErr := r.filesWriter.writeToFile(r.targetPath(file.location), blobData, offset, createSize, file.sparse)
if r.progress != nil {
r.progress.AddProgress(file.location, uint64(len(blobData)), uint64(file.size))
}
return writeErr
}
err := r.sanitizeError(file, writeToFile())
if err != nil {
return err
}
}
}
return nil
}
for file, offsets := range blob.files {
for _, offset := range offsets {
writeToFile := func() error {
// this looks overly complicated and needs explanation
// two competing requirements:
// - must create the file once and only once
// - should allow concurrent writes to the file
// so write the first blob while holding file lock
// write other blobs after releasing the lock
createSize := int64(-1)
file.lock.Lock()
if file.inProgress {
file.lock.Unlock()
} else {
defer file.lock.Unlock()
file.inProgress = true
createSize = file.size
}
writeErr := r.filesWriter.writeToFile(r.targetPath(file.location), blobData, offset, createSize, file.sparse)
if r.progress != nil {
r.progress.AddProgress(file.location, uint64(len(blobData)), uint64(file.size))
}
return writeErr
}
err := sanitizeError(file, writeToFile())
if err != nil {
return err
}
}
}
return nil
})
if err != nil {
for file := range pack.files {
if errFile := sanitizeError(file, err); errFile != nil {
return errFile
}
}
}
return nil
})
}

View File

@ -248,6 +248,27 @@ func TestFileRestorerPackSkip(t *testing.T) {
}
}
func TestFileRestorerFrequentBlob(t *testing.T) {
tempdir := rtest.TempDir(t)
for _, sparse := range []bool{false, true} {
blobs := []TestBlob{
{"data1-1", "pack1-1"},
}
for i := 0; i < 10000; i++ {
blobs = append(blobs, TestBlob{"a", "pack1-1"})
}
blobs = append(blobs, TestBlob{"end", "pack1-1"})
restoreAndVerify(t, tempdir, []TestFile{
{
name: "file1",
blobs: blobs,
},
}, nil, sparse)
}
}
func TestErrorRestoreFiles(t *testing.T) {
tempdir := rtest.TempDir(t)
content := []TestFile{
@ -317,3 +338,47 @@ func testPartialDownloadError(t *testing.T, part int) {
rtest.OK(t, err)
verifyRestore(t, r, repo)
}
func TestFatalDownloadError(t *testing.T) {
tempdir := rtest.TempDir(t)
content := []TestFile{
{
name: "file1",
blobs: []TestBlob{
{"data1-1", "pack1"},
{"data1-2", "pack1"},
},
},
{
name: "file2",
blobs: []TestBlob{
{"data2-1", "pack1"},
{"data2-2", "pack1"},
{"data2-3", "pack1"},
},
}}
repo := newTestRepo(content)
loader := repo.loader
repo.loader = func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
// only return half the data to break file2
return loader(ctx, h, length/2, offset, fn)
}
r := newFileRestorer(tempdir, repo.loader, repo.key, repo.Lookup, 2, false, nil)
r.files = repo.files
var errors []string
r.Error = func(s string, e error) error {
// ignore errors as in the `restore` command
errors = append(errors, s)
return nil
}
err := r.restoreFiles(context.TODO())
rtest.OK(t, err)
rtest.Assert(t, len(errors) == 1, "unexpected number of restore errors, expected: 1, got: %v", len(errors))
rtest.Assert(t, errors[0] == "file2", "expected error for file2, got: %v", errors[0])
}

View File

@ -9,8 +9,8 @@ import (
"syscall"
"unsafe"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/sys/windows"
"golang.org/x/term"
)
// clearCurrentLine removes all characters from the current line and resets the
@ -74,7 +74,7 @@ func windowsMoveCursorUp(_ io.Writer, fd uintptr, n int) {
// isWindowsTerminal return true if the file descriptor is a windows terminal (cmd, psh).
func isWindowsTerminal(fd uintptr) bool {
return terminal.IsTerminal(int(fd))
return term.IsTerminal(int(fd))
}
func isPipe(fd uintptr) bool {