mirror of https://github.com/restic/restic.git
Compare commits
37 Commits
f415b051ab
...
b26d492578
Author | SHA1 | Date |
---|---|---|
Markus | b26d492578 | |
Michael Eischer | 24c1822220 | |
flow-c | d4477a5a99 | |
Michael Eischer | ffe5439149 | |
Michael Eischer | 676f0dc60d | |
Michael Eischer | 1e57057953 | |
Michael Eischer | 1ba0af6993 | |
Michael Eischer | ffc41ae62a | |
Michael Eischer | 4832c2fbfa | |
dependabot[bot] | 30609ae6b2 | |
dependabot[bot] | 502e5867a5 | |
dependabot[bot] | 18a6d6b408 | |
dependabot[bot] | 3bb88e8307 | |
Michael Eischer | ccac7c7fb3 | |
DRON-666 | ccd35565ee | |
DRON-666 | 125dba23c5 | |
DRON-666 | 7ee889bb0d | |
DRON-666 | 90b168eb6c | |
DRON-666 | 24330c19a8 | |
DRON-666 | 5703e5a652 | |
DRON-666 | 0a8f9c5d9c | |
DRON-666 | 739d3243d9 | |
DRON-666 | bb0f93ef3d | |
DRON-666 | 3bac1f0135 | |
DRON-666 | 88c509e3e9 | |
DRON-666 | 9d3d915e2c | |
DRON-666 | 9182e6bab5 | |
DRON-666 | c4f67c0064 | |
DRON-666 | 7470e5356e | |
DRON-666 | 78dbc5ec58 | |
Michael Eischer | 20d8eed400 | |
Michael Eischer | cf700d8794 | |
Michael Eischer | 666a0b0bdb | |
Michael Eischer | 621012dac0 | |
Markus Kramer | 1925457b78 | |
Markus Kramer | d57ef6ec75 | |
Markus Kramer | 2c37fb27ad |
|
@ -261,7 +261,7 @@ jobs:
|
|||
uses: actions/checkout@v4
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v4
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.57.1
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
Enhancement: Improve reliability of backend operations
|
||||
|
||||
Restic now downloads pack files in large chunks instead of using a streaming
|
||||
download. This prevents failures due to interrupted streams. The `restore`
|
||||
command now also retries downloading individual blobs that cannot be retrieved.
|
||||
|
||||
https://github.com/restic/restic/issues/4627
|
||||
https://github.com/restic/restic/pull/4605
|
|
@ -0,0 +1,22 @@
|
|||
Enhancement: Add options to configure Windows Shadow Copy Service
|
||||
|
||||
Restic always used 120 seconds timeout and unconditionally created VSS snapshots
|
||||
for all volume mount points on disk. Now this behavior can be fine-tuned by
|
||||
new options, like exclude specific volumes and mount points or completely
|
||||
disable auto snapshotting of volume mount points.
|
||||
|
||||
For example:
|
||||
|
||||
restic backup --use-fs-snapshot -o vss.timeout=5m -o vss.exclude-all-mount-points=true
|
||||
|
||||
changes timeout to five minutes and disable snapshotting of mount points on all volumes, and
|
||||
|
||||
restic backup --use-fs-snapshot -o vss.exclude-volumes="d:\;c:\mnt\;\\?\Volume{e2e0315d-9066-4f97-8343-eb5659b35762}"
|
||||
|
||||
excludes drive `d:`, mount point `c:\mnt` and specific volume from VSS snapshotting.
|
||||
|
||||
restic backup --use-fs-snapshot -o vss.provider={b5946137-7b9f-4925-af80-51abd60b20d5}
|
||||
|
||||
uses 'Microsoft Software Shadow Copy provider 1.0' instead of the default provider.
|
||||
|
||||
https://github.com/restic/restic/pull/3067
|
|
@ -445,7 +445,16 @@ func findParentSnapshot(ctx context.Context, repo restic.ListerLoaderUnpacked, o
|
|||
}
|
||||
|
||||
func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error {
|
||||
err := opts.Check(gopts, args)
|
||||
var vsscfg fs.VSSConfig
|
||||
var err error
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
if vsscfg, err = fs.ParseVSSConfig(gopts.extended); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = opts.Check(gopts, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -547,8 +556,8 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
|||
return err
|
||||
}
|
||||
|
||||
errorHandler := func(item string, err error) error {
|
||||
return progressReporter.Error(item, err)
|
||||
errorHandler := func(item string, err error) {
|
||||
_ = progressReporter.Error(item, err)
|
||||
}
|
||||
|
||||
messageHandler := func(msg string, args ...interface{}) {
|
||||
|
@ -557,7 +566,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
|||
}
|
||||
}
|
||||
|
||||
localVss := fs.NewLocalVss(errorHandler, messageHandler)
|
||||
localVss := fs.NewLocalVss(errorHandler, messageHandler, vsscfg)
|
||||
defer localVss.DeleteSnapshots()
|
||||
targetFS = localVss
|
||||
}
|
||||
|
|
|
@ -45,6 +45,17 @@ size of the files and directories in ``~/work`` on the local file system. It
|
|||
also tells us that only 1.200 GiB was added to the repository. This means that
|
||||
some of the data was duplicate and restic was able to efficiently reduce it.
|
||||
|
||||
We just attached the absolute path ``~/work`` to the backup, so the path
|
||||
within the snapshot is ``/home/user/work``.
|
||||
|
||||
If we attach a relative path ``work``, the path within the snaphot is ``/work``.
|
||||
|
||||
For example ``restic backup work`` run from ``/home/user`` creates a snapshot
|
||||
with a directory ``/home/user/work`` that contains the path ``/work``
|
||||
within the snapshot. This path-related discrepancy applies to each command
|
||||
that tries to access data within a snapshot. You can lookup the paths within
|
||||
a repository using the ``ls latest /`` command.
|
||||
|
||||
If you don't pass the ``--verbose`` option, restic will print less data. You'll
|
||||
still get a nice live status display. Be aware that the live status shows the
|
||||
processed files and not the transferred data. Transferred volume might be lower
|
||||
|
@ -56,6 +67,39 @@ snapshot for each volume that contains files to backup. Files are read from the
|
|||
VSS snapshot instead of the regular filesystem. This allows to backup files that are
|
||||
exclusively locked by another process during the backup.
|
||||
|
||||
You can use additional options to change VSS behaviour:
|
||||
|
||||
* ``-o vss.timeout`` specifies timeout for VSS snapshot creation, the default value is 120 seconds
|
||||
* ``-o vss.exclude-all-mount-points`` disable auto snapshotting of all volume mount points
|
||||
* ``-o vss.exclude-volumes`` allows excluding specific volumes or volume mount points from snapshotting
|
||||
* ``-o vss.provider`` specifies VSS provider used for snapshotting
|
||||
|
||||
For example a 2.5 minutes timeout with snapshotting of mount points disabled can be specified as
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
-o vss.timeout=2m30s -o vss.exclude-all-mount-points=true
|
||||
|
||||
and excluding drive ``d:\``, mount point ``c:\mnt`` and volume ``\\?\Volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}\`` as
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
-o vss.exclude-volumes="d:;c:\mnt\;\\?\volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}"
|
||||
|
||||
VSS provider can be specified by GUID
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
-o vss.provider={3f900f90-00e9-440e-873a-96ca5eb079e5}
|
||||
|
||||
or by name
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
-o vss.provider="Hyper-V IC Software Shadow Copy Provider"
|
||||
|
||||
Also ``MS`` can be used as alias for ``Microsoft Software Shadow Copy provider 1.0``.
|
||||
|
||||
By default VSS ignores Outlook OST files. This is not a restriction of restic
|
||||
but the default Windows VSS configuration. The files not to snapshot are
|
||||
configured in the Windows registry under the following key:
|
||||
|
|
|
@ -34,6 +34,9 @@ size of the contained files at the time when the snapshot was created.
|
|||
590c8fc8 2015-05-08 21:47:38 kazik /srv 580.200MiB
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB
|
||||
|
||||
The Directory column shows the attached path, which may differ
|
||||
from the path within the repository, see https://restic.readthedocs.io/en/stable/040_backup.html#backing-up for details of this discrepancy.
|
||||
|
||||
You can filter the listing by directory path:
|
||||
|
||||
.. code-block:: console
|
||||
|
|
|
@ -156,6 +156,31 @@ e.g.:
|
|||
|
||||
$ restic -r /srv/restic-repo dump --path /production.sql latest production.sql | mysql
|
||||
|
||||
This example assumes you ran a backup using an absolute path, which coincides with the
|
||||
path within the snaphots.
|
||||
See https://restic.readthedocs.io/en/stable/040_backup.html#backing-up for the difference
|
||||
between the path used to create the repository and the paths within the snaphots.
|
||||
|
||||
If you ran a backup using the relative path ``work/``, the ``dump`` command would look like:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /srv/restic-repo dump latest /work/README.md
|
||||
|
||||
|
||||
If dump results in the error message ``cannot dump file: path "/home" not found in snapshot``
|
||||
first double check you used the path within the snaphot, using the ``ls latest /`` command,
|
||||
which for the repository above results in:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /srv/restic-repo ls latest /
|
||||
enter password for repository:
|
||||
snapshot 1541acae of [/home/other/work] filtered by [/] at 2023-08-09 04:00:03.533117139 +0200 CEST):
|
||||
/work
|
||||
|
||||
|
||||
|
||||
It is also possible to ``dump`` the contents of a whole folder structure to
|
||||
stdout. To retain the information about the files and folders Restic will
|
||||
output the contents in the tar (default) or zip format:
|
||||
|
|
|
@ -205,7 +205,7 @@ The ``forget`` command accepts the following policy options:
|
|||
natural time boundaries and *not* relative to when you run ``forget``. Weeks
|
||||
are Monday 00:00 to Sunday 23:59, days 00:00 to 23:59, hours :00 to :59, etc.
|
||||
They also only count hours/days/weeks/etc which have one or more snapshots.
|
||||
A value of ``-1`` will be interpreted as "forever", i.e. "keep all".
|
||||
A value of ``unlimited`` will be interpreted as "forever", i.e. "keep all".
|
||||
|
||||
.. note:: All duration related options (``--keep-{within-,}*``) ignore snapshots
|
||||
with a timestamp in the future (relative to when the ``forget`` command is
|
||||
|
|
14
go.mod
14
go.mod
|
@ -2,9 +2,9 @@ module github.com/restic/restic
|
|||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.40.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2
|
||||
github.com/Backblaze/blazer v0.6.1
|
||||
github.com/anacrolix/fuse v0.2.0
|
||||
github.com/cenkalti/backoff/v4 v4.2.1
|
||||
|
@ -13,7 +13,7 @@ require (
|
|||
github.com/go-ole/go-ole v1.3.0
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7
|
||||
github.com/klauspost/compress v1.17.7
|
||||
github.com/klauspost/compress v1.17.8
|
||||
github.com/minio/minio-go/v7 v7.0.66
|
||||
github.com/minio/sha256-simd v1.0.1
|
||||
github.com/ncw/swift/v2 v2.0.2
|
||||
|
@ -26,12 +26,12 @@ require (
|
|||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
go.uber.org/automaxprocs v1.5.3
|
||||
golang.org/x/crypto v0.21.0
|
||||
golang.org/x/net v0.23.0
|
||||
golang.org/x/crypto v0.22.0
|
||||
golang.org/x/net v0.24.0
|
||||
golang.org/x/oauth2 v0.18.0
|
||||
golang.org/x/sync v0.6.0
|
||||
golang.org/x/sys v0.18.0
|
||||
golang.org/x/term v0.18.0
|
||||
golang.org/x/sys v0.19.0
|
||||
golang.org/x/term v0.19.0
|
||||
golang.org/x/text v0.14.0
|
||||
golang.org/x/time v0.5.0
|
||||
google.golang.org/api v0.170.0
|
||||
|
|
28
go.sum
28
go.sum
|
@ -9,15 +9,15 @@ cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM=
|
|||
cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA=
|
||||
cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw=
|
||||
cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 h1:fXPMAmuh0gDuRDey0atC8cXBuKIlqCzCkL8sm1n9Ov0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 h1:YUUxeiOWgdAQE3pXt2H7QXzZs0q8UBjgRbl56qo8GYM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2/go.mod h1:dmXQgZuiSubAecswZE+Sm8jkvEa7kQgTPVRvwL/nd0E=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/Backblaze/blazer v0.6.1 h1:xC9HyC7OcxRzzmtfRiikIEvq4HZYWjU6caFwX2EXw1s=
|
||||
|
@ -114,8 +114,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
|
|||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
|
||||
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
|
||||
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
|
||||
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
|
@ -206,8 +206,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
|
||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
|
@ -227,8 +227,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
|
||||
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
|
||||
|
@ -255,14 +255,14 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
|
||||
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
|
|
|
@ -567,7 +567,7 @@ func checkPack(ctx context.Context, r restic.Repository, id restic.ID, blobs []r
|
|||
hrd := hashing.NewReader(rd, sha256.New())
|
||||
bufRd.Reset(hrd)
|
||||
|
||||
it := repository.NewPackBlobIterator(id, bufRd, 0, blobs, r.Key(), dec)
|
||||
it := repository.NewPackBlobIterator(id, newBufReader(bufRd), 0, blobs, r.Key(), dec)
|
||||
for {
|
||||
val, err := it.Next()
|
||||
if err == repository.ErrPackEOF {
|
||||
|
@ -653,11 +653,41 @@ func checkPack(ctx context.Context, r restic.Repository, id restic.ID, blobs []r
|
|||
return nil
|
||||
}
|
||||
|
||||
type bufReader struct {
|
||||
rd *bufio.Reader
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func newBufReader(rd *bufio.Reader) *bufReader {
|
||||
return &bufReader{
|
||||
rd: rd,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bufReader) Discard(n int) (discarded int, err error) {
|
||||
return b.rd.Discard(n)
|
||||
}
|
||||
|
||||
func (b *bufReader) ReadFull(n int) (buf []byte, err error) {
|
||||
if cap(b.buf) < n {
|
||||
b.buf = make([]byte, n)
|
||||
}
|
||||
b.buf = b.buf[:n]
|
||||
|
||||
_, err = io.ReadFull(b.rd, b.buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.buf, nil
|
||||
}
|
||||
|
||||
// ReadData loads all data from the repository and checks the integrity.
|
||||
func (c *Checker) ReadData(ctx context.Context, errChan chan<- error) {
|
||||
c.ReadPacks(ctx, c.packs, nil, errChan)
|
||||
}
|
||||
|
||||
const maxStreamBufferSize = 4 * 1024 * 1024
|
||||
|
||||
// ReadPacks loads data from specified packs and checks the integrity.
|
||||
func (c *Checker) ReadPacks(ctx context.Context, packs map[restic.ID]int64, p *progress.Counter, errChan chan<- error) {
|
||||
defer close(errChan)
|
||||
|
@ -675,9 +705,7 @@ func (c *Checker) ReadPacks(ctx context.Context, packs map[restic.ID]int64, p *p
|
|||
// run workers
|
||||
for i := 0; i < workerCount; i++ {
|
||||
g.Go(func() error {
|
||||
// create a buffer that is large enough to be reused by repository.StreamPack
|
||||
// this ensures that we can read the pack header later on
|
||||
bufRd := bufio.NewReaderSize(nil, repository.MaxStreamBufferSize)
|
||||
bufRd := bufio.NewReaderSize(nil, maxStreamBufferSize)
|
||||
dec, err := zstd.NewReader(nil)
|
||||
if err != nil {
|
||||
panic(dec)
|
||||
|
|
|
@ -3,41 +3,108 @@ package fs
|
|||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/options"
|
||||
)
|
||||
|
||||
// ErrorHandler is used to report errors via callback
|
||||
type ErrorHandler func(item string, err error) error
|
||||
// VSSConfig holds extended options of windows volume shadow copy service.
|
||||
type VSSConfig struct {
|
||||
ExcludeAllMountPoints bool `option:"exclude-all-mount-points" help:"exclude mountpoints from snapshotting on all volumes"`
|
||||
ExcludeVolumes string `option:"exclude-volumes" help:"semicolon separated list of volumes to exclude from snapshotting (ex. 'c:\\;e:\\mnt;\\\\?\\Volume{...}')"`
|
||||
Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshot before timing out"`
|
||||
Provider string `option:"provider" help:"VSS provider identifier which will be used for snapshotting"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
if runtime.GOOS == "windows" {
|
||||
options.Register("vss", VSSConfig{})
|
||||
}
|
||||
}
|
||||
|
||||
// NewVSSConfig returns a new VSSConfig with the default values filled in.
|
||||
func NewVSSConfig() VSSConfig {
|
||||
return VSSConfig{
|
||||
Timeout: time.Second * 120,
|
||||
}
|
||||
}
|
||||
|
||||
// ParseVSSConfig parses a VSS extended options to VSSConfig struct.
|
||||
func ParseVSSConfig(o options.Options) (VSSConfig, error) {
|
||||
cfg := NewVSSConfig()
|
||||
o = o.Extract("vss")
|
||||
if err := o.Apply("vss", &cfg); err != nil {
|
||||
return VSSConfig{}, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// ErrorHandler is used to report errors via callback.
|
||||
type ErrorHandler func(item string, err error)
|
||||
|
||||
// MessageHandler is used to report errors/messages via callbacks.
|
||||
type MessageHandler func(msg string, args ...interface{})
|
||||
|
||||
// VolumeFilter is used to filter volumes by it's mount point or GUID path.
|
||||
type VolumeFilter func(volume string) bool
|
||||
|
||||
// LocalVss is a wrapper around the local file system which uses windows volume
|
||||
// shadow copy service (VSS) in a transparent way.
|
||||
type LocalVss struct {
|
||||
FS
|
||||
snapshots map[string]VssSnapshot
|
||||
failedSnapshots map[string]struct{}
|
||||
mutex sync.RWMutex
|
||||
msgError ErrorHandler
|
||||
msgMessage MessageHandler
|
||||
snapshots map[string]VssSnapshot
|
||||
failedSnapshots map[string]struct{}
|
||||
mutex sync.RWMutex
|
||||
msgError ErrorHandler
|
||||
msgMessage MessageHandler
|
||||
excludeAllMountPoints bool
|
||||
excludeVolumes map[string]struct{}
|
||||
timeout time.Duration
|
||||
provider string
|
||||
}
|
||||
|
||||
// statically ensure that LocalVss implements FS.
|
||||
var _ FS = &LocalVss{}
|
||||
|
||||
// parseMountPoints try to convert semicolon separated list of mount points
|
||||
// to map of lowercased volume GUID pathes. Mountpoints already in volume
|
||||
// GUID path format will be validated and normalized.
|
||||
func parseMountPoints(list string, msgError ErrorHandler) (volumes map[string]struct{}) {
|
||||
if list == "" {
|
||||
return
|
||||
}
|
||||
for _, s := range strings.Split(list, ";") {
|
||||
if v, err := GetVolumeNameForVolumeMountPoint(s); err != nil {
|
||||
msgError(s, errors.Errorf("failed to parse vss.exclude-volumes [%s]: %s", s, err))
|
||||
} else {
|
||||
if volumes == nil {
|
||||
volumes = make(map[string]struct{})
|
||||
}
|
||||
volumes[strings.ToLower(v)] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// NewLocalVss creates a new wrapper around the windows filesystem using volume
|
||||
// shadow copy service to access locked files.
|
||||
func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler) *LocalVss {
|
||||
func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler, cfg VSSConfig) *LocalVss {
|
||||
return &LocalVss{
|
||||
FS: Local{},
|
||||
snapshots: make(map[string]VssSnapshot),
|
||||
failedSnapshots: make(map[string]struct{}),
|
||||
msgError: msgError,
|
||||
msgMessage: msgMessage,
|
||||
FS: Local{},
|
||||
snapshots: make(map[string]VssSnapshot),
|
||||
failedSnapshots: make(map[string]struct{}),
|
||||
msgError: msgError,
|
||||
msgMessage: msgMessage,
|
||||
excludeAllMountPoints: cfg.ExcludeAllMountPoints,
|
||||
excludeVolumes: parseMountPoints(cfg.ExcludeVolumes, msgError),
|
||||
timeout: cfg.Timeout,
|
||||
provider: cfg.Provider,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -50,7 +117,7 @@ func (fs *LocalVss) DeleteSnapshots() {
|
|||
|
||||
for volumeName, snapshot := range fs.snapshots {
|
||||
if err := snapshot.Delete(); err != nil {
|
||||
_ = fs.msgError(volumeName, errors.Errorf("failed to delete VSS snapshot: %s", err))
|
||||
fs.msgError(volumeName, errors.Errorf("failed to delete VSS snapshot: %s", err))
|
||||
activeSnapshots[volumeName] = snapshot
|
||||
}
|
||||
}
|
||||
|
@ -78,12 +145,27 @@ func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) {
|
|||
return os.Lstat(fs.snapshotPath(name))
|
||||
}
|
||||
|
||||
// isMountPointIncluded is true if given mountpoint included by user.
|
||||
func (fs *LocalVss) isMountPointIncluded(mountPoint string) bool {
|
||||
if fs.excludeVolumes == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
volume, err := GetVolumeNameForVolumeMountPoint(mountPoint)
|
||||
if err != nil {
|
||||
fs.msgError(mountPoint, errors.Errorf("failed to get volume from mount point [%s]: %s", mountPoint, err))
|
||||
return true
|
||||
}
|
||||
|
||||
_, ok := fs.excludeVolumes[strings.ToLower(volume)]
|
||||
return !ok
|
||||
}
|
||||
|
||||
// snapshotPath returns the path inside a VSS snapshots if it already exists.
|
||||
// If the path is not yet available as a snapshot, a snapshot is created.
|
||||
// If creation of a snapshot fails the file's original path is returned as
|
||||
// a fallback.
|
||||
func (fs *LocalVss) snapshotPath(path string) string {
|
||||
|
||||
fixPath := fixpath(path)
|
||||
|
||||
if strings.HasPrefix(fixPath, `\\?\UNC\`) {
|
||||
|
@ -114,23 +196,36 @@ func (fs *LocalVss) snapshotPath(path string) string {
|
|||
|
||||
if !snapshotExists && !snapshotFailed {
|
||||
vssVolume := volumeNameLower + string(filepath.Separator)
|
||||
fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume)
|
||||
|
||||
if snapshot, err := NewVssSnapshot(vssVolume, 120, fs.msgError); err != nil {
|
||||
_ = fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s",
|
||||
vssVolume, err))
|
||||
if !fs.isMountPointIncluded(vssVolume) {
|
||||
fs.msgMessage("snapshots for [%s] excluded by user\n", vssVolume)
|
||||
fs.failedSnapshots[volumeNameLower] = struct{}{}
|
||||
} else {
|
||||
fs.snapshots[volumeNameLower] = snapshot
|
||||
fs.msgMessage("successfully created snapshot for [%s]\n", vssVolume)
|
||||
if len(snapshot.mountPointInfo) > 0 {
|
||||
fs.msgMessage("mountpoints in snapshot volume [%s]:\n", vssVolume)
|
||||
for mp, mpInfo := range snapshot.mountPointInfo {
|
||||
info := ""
|
||||
if !mpInfo.IsSnapshotted() {
|
||||
info = " (not snapshotted)"
|
||||
fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume)
|
||||
|
||||
var includeVolume VolumeFilter
|
||||
if !fs.excludeAllMountPoints {
|
||||
includeVolume = func(volume string) bool {
|
||||
return fs.isMountPointIncluded(volume)
|
||||
}
|
||||
}
|
||||
|
||||
if snapshot, err := NewVssSnapshot(fs.provider, vssVolume, fs.timeout, includeVolume, fs.msgError); err != nil {
|
||||
fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s",
|
||||
vssVolume, err))
|
||||
fs.failedSnapshots[volumeNameLower] = struct{}{}
|
||||
} else {
|
||||
fs.snapshots[volumeNameLower] = snapshot
|
||||
fs.msgMessage("successfully created snapshot for [%s]\n", vssVolume)
|
||||
if len(snapshot.mountPointInfo) > 0 {
|
||||
fs.msgMessage("mountpoints in snapshot volume [%s]:\n", vssVolume)
|
||||
for mp, mpInfo := range snapshot.mountPointInfo {
|
||||
info := ""
|
||||
if !mpInfo.IsSnapshotted() {
|
||||
info = " (not snapshotted)"
|
||||
}
|
||||
fs.msgMessage(" - %s%s\n", mp, info)
|
||||
}
|
||||
fs.msgMessage(" - %s%s\n", mp, info)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -173,9 +268,8 @@ func (fs *LocalVss) snapshotPath(path string) string {
|
|||
snapshotPath = fs.Join(snapshot.GetSnapshotDeviceObject(),
|
||||
strings.TrimPrefix(fixPath, volumeName))
|
||||
if snapshotPath == snapshot.GetSnapshotDeviceObject() {
|
||||
snapshotPath = snapshotPath + string(filepath.Separator)
|
||||
snapshotPath += string(filepath.Separator)
|
||||
}
|
||||
|
||||
} else {
|
||||
// no snapshot is available for the requested path:
|
||||
// -> try to backup without a snapshot
|
||||
|
|
|
@ -0,0 +1,285 @@
|
|||
// +build windows
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ole "github.com/go-ole/go-ole"
|
||||
"github.com/restic/restic/internal/options"
|
||||
)
|
||||
|
||||
func matchStrings(ptrs []string, strs []string) bool {
|
||||
if len(ptrs) != len(strs) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, p := range ptrs {
|
||||
if p == "" {
|
||||
return false
|
||||
}
|
||||
matched, err := regexp.MatchString(p, strs[i])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if !matched {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func matchMap(strs []string, m map[string]struct{}) bool {
|
||||
if len(strs) != len(m) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, s := range strs {
|
||||
if _, ok := m[s]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func TestVSSConfig(t *testing.T) {
|
||||
type config struct {
|
||||
excludeAllMountPoints bool
|
||||
timeout time.Duration
|
||||
provider string
|
||||
}
|
||||
setTests := []struct {
|
||||
input options.Options
|
||||
output config
|
||||
}{
|
||||
{
|
||||
options.Options{
|
||||
"vss.timeout": "6h38m42s",
|
||||
"vss.provider": "Ms",
|
||||
},
|
||||
config{
|
||||
timeout: 23922000000000,
|
||||
provider: "Ms",
|
||||
},
|
||||
},
|
||||
{
|
||||
options.Options{
|
||||
"vss.exclude-all-mount-points": "t",
|
||||
"vss.provider": "{b5946137-7b9f-4925-af80-51abd60b20d5}",
|
||||
},
|
||||
config{
|
||||
excludeAllMountPoints: true,
|
||||
timeout: 120000000000,
|
||||
provider: "{b5946137-7b9f-4925-af80-51abd60b20d5}",
|
||||
},
|
||||
},
|
||||
{
|
||||
options.Options{
|
||||
"vss.exclude-all-mount-points": "0",
|
||||
"vss.exclude-volumes": "",
|
||||
"vss.timeout": "120s",
|
||||
"vss.provider": "Microsoft Software Shadow Copy provider 1.0",
|
||||
},
|
||||
config{
|
||||
timeout: 120000000000,
|
||||
provider: "Microsoft Software Shadow Copy provider 1.0",
|
||||
},
|
||||
},
|
||||
}
|
||||
for i, test := range setTests {
|
||||
t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) {
|
||||
cfg, err := ParseVSSConfig(test.input)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
errorHandler := func(item string, err error) {
|
||||
t.Fatalf("unexpected error (%v)", err)
|
||||
}
|
||||
messageHandler := func(msg string, args ...interface{}) {
|
||||
t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args))
|
||||
}
|
||||
|
||||
dst := NewLocalVss(errorHandler, messageHandler, cfg)
|
||||
|
||||
if dst.excludeAllMountPoints != test.output.excludeAllMountPoints ||
|
||||
dst.excludeVolumes != nil || dst.timeout != test.output.timeout ||
|
||||
dst.provider != test.output.provider {
|
||||
t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.output, dst)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMountPoints(t *testing.T) {
|
||||
volumeMatch := regexp.MustCompile(`^\\\\\?\\Volume\{[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}\}\\$`)
|
||||
|
||||
// It's not a good idea to test functions based on GetVolumeNameForVolumeMountPoint by calling
|
||||
// GetVolumeNameForVolumeMountPoint itself, but we have restricted test environment:
|
||||
// cannot manage volumes and can only be sure that the mount point C:\ exists
|
||||
sysVolume, err := GetVolumeNameForVolumeMountPoint("C:")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// We don't know a valid volume GUID path for c:\, but we'll at least check its format
|
||||
if !volumeMatch.MatchString(sysVolume) {
|
||||
t.Fatalf("invalid volume GUID path: %s", sysVolume)
|
||||
}
|
||||
// Changing the case and removing trailing backslash allows tests
|
||||
// the equality of different ways of writing a volume name
|
||||
sysVolumeMutated := strings.ToUpper(sysVolume[:len(sysVolume)-1])
|
||||
sysVolumeMatch := strings.ToLower(sysVolume)
|
||||
|
||||
type check struct {
|
||||
volume string
|
||||
result bool
|
||||
}
|
||||
setTests := []struct {
|
||||
input options.Options
|
||||
output []string
|
||||
checks []check
|
||||
errors []string
|
||||
}{
|
||||
{
|
||||
options.Options{
|
||||
"vss.exclude-volumes": `c:;c:\;` + sysVolume + `;` + sysVolumeMutated,
|
||||
},
|
||||
[]string{
|
||||
sysVolumeMatch,
|
||||
},
|
||||
[]check{
|
||||
{`c:\`, false},
|
||||
{`c:`, false},
|
||||
{sysVolume, false},
|
||||
{sysVolumeMutated, false},
|
||||
},
|
||||
[]string{},
|
||||
},
|
||||
{
|
||||
options.Options{
|
||||
"vss.exclude-volumes": `z:\nonexistent;c:;c:\windows\;\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`,
|
||||
},
|
||||
[]string{
|
||||
sysVolumeMatch,
|
||||
},
|
||||
[]check{
|
||||
{`c:\windows\`, true},
|
||||
{`\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, true},
|
||||
{`c:`, false},
|
||||
{``, true},
|
||||
},
|
||||
[]string{
|
||||
`failed to parse vss\.exclude-volumes \[z:\\nonexistent\]:.*`,
|
||||
`failed to parse vss\.exclude-volumes \[c:\\windows\\\]:.*`,
|
||||
`failed to parse vss\.exclude-volumes \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`,
|
||||
`failed to get volume from mount point \[c:\\windows\\\]:.*`,
|
||||
`failed to get volume from mount point \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`,
|
||||
`failed to get volume from mount point \[\]:.*`,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range setTests {
|
||||
t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) {
|
||||
cfg, err := ParseVSSConfig(test.input)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var log []string
|
||||
errorHandler := func(item string, err error) {
|
||||
log = append(log, strings.TrimSpace(err.Error()))
|
||||
}
|
||||
messageHandler := func(msg string, args ...interface{}) {
|
||||
t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args))
|
||||
}
|
||||
|
||||
dst := NewLocalVss(errorHandler, messageHandler, cfg)
|
||||
|
||||
if !matchMap(test.output, dst.excludeVolumes) {
|
||||
t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v",
|
||||
test.output, dst.excludeVolumes)
|
||||
}
|
||||
|
||||
for _, c := range test.checks {
|
||||
if dst.isMountPointIncluded(c.volume) != c.result {
|
||||
t.Fatalf(`wrong check: isMountPointIncluded("%s") != %v`, c.volume, c.result)
|
||||
}
|
||||
}
|
||||
|
||||
if !matchStrings(test.errors, log) {
|
||||
t.Fatalf("wrong log, want:\n %#v\ngot:\n %#v", test.errors, log)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseProvider(t *testing.T) {
|
||||
msProvider := ole.NewGUID("{b5946137-7b9f-4925-af80-51abd60b20d5}")
|
||||
setTests := []struct {
|
||||
provider string
|
||||
id *ole.GUID
|
||||
result string
|
||||
}{
|
||||
{
|
||||
"",
|
||||
ole.IID_NULL,
|
||||
"",
|
||||
},
|
||||
{
|
||||
"mS",
|
||||
msProvider,
|
||||
"",
|
||||
},
|
||||
{
|
||||
"{B5946137-7b9f-4925-Af80-51abD60b20d5}",
|
||||
msProvider,
|
||||
"",
|
||||
},
|
||||
{
|
||||
"Microsoft Software Shadow Copy provider 1.0",
|
||||
msProvider,
|
||||
"",
|
||||
},
|
||||
{
|
||||
"{04560982-3d7d-4bbc-84f7-0712f833a28f}",
|
||||
nil,
|
||||
`invalid VSS provider "{04560982-3d7d-4bbc-84f7-0712f833a28f}"`,
|
||||
},
|
||||
{
|
||||
"non-existent provider",
|
||||
nil,
|
||||
`invalid VSS provider "non-existent provider"`,
|
||||
},
|
||||
}
|
||||
|
||||
_ = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
|
||||
|
||||
for i, test := range setTests {
|
||||
t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) {
|
||||
id, err := getProviderID(test.provider)
|
||||
|
||||
if err != nil && id != nil {
|
||||
t.Fatalf("err!=nil but id=%v", id)
|
||||
}
|
||||
|
||||
if test.result != "" || err != nil {
|
||||
var result string
|
||||
if err != nil {
|
||||
result = err.Error()
|
||||
}
|
||||
if test.result != result || test.result == "" {
|
||||
t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.result, result)
|
||||
}
|
||||
} else if !ole.IsEqualGUID(id, test.id) {
|
||||
t.Fatalf("wrong id, want:\n %s\ngot:\n %s", test.id.String(), id.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -4,6 +4,8 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
)
|
||||
|
||||
|
@ -31,10 +33,16 @@ func HasSufficientPrivilegesForVSS() error {
|
|||
return errors.New("VSS snapshots are only supported on windows")
|
||||
}
|
||||
|
||||
// GetVolumeNameForVolumeMountPoint add trailing backslash to input parameter
|
||||
// and calls the equivalent windows api.
|
||||
func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) {
|
||||
return mountPoint, nil
|
||||
}
|
||||
|
||||
// NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't
|
||||
// finish within the timeout an error is returned.
|
||||
func NewVssSnapshot(
|
||||
_ string, _ uint, _ ErrorHandler) (VssSnapshot, error) {
|
||||
func NewVssSnapshot(_ string,
|
||||
_ string, _ time.Duration, _ VolumeFilter, _ ErrorHandler) (VssSnapshot, error) {
|
||||
return VssSnapshot{}, errors.New("VSS snapshots are only supported on windows")
|
||||
}
|
||||
|
||||
|
|
|
@ -5,10 +5,12 @@ package fs
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
ole "github.com/go-ole/go-ole"
|
||||
|
@ -20,8 +22,10 @@ import (
|
|||
type HRESULT uint
|
||||
|
||||
// HRESULT constant values necessary for using VSS api.
|
||||
//nolint:golint
|
||||
const (
|
||||
S_OK HRESULT = 0x00000000
|
||||
S_FALSE HRESULT = 0x00000001
|
||||
E_ACCESSDENIED HRESULT = 0x80070005
|
||||
E_OUTOFMEMORY HRESULT = 0x8007000E
|
||||
E_INVALIDARG HRESULT = 0x80070057
|
||||
|
@ -255,6 +259,7 @@ type IVssBackupComponents struct {
|
|||
}
|
||||
|
||||
// IVssBackupComponentsVTable is the vtable for IVssBackupComponents.
|
||||
// nolint:structcheck
|
||||
type IVssBackupComponentsVTable struct {
|
||||
ole.IUnknownVtbl
|
||||
getWriterComponentsCount uintptr
|
||||
|
@ -364,7 +369,7 @@ func (vss *IVssBackupComponents) convertToVSSAsync(
|
|||
}
|
||||
|
||||
// IsVolumeSupported calls the equivalent VSS api.
|
||||
func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, error) {
|
||||
func (vss *IVssBackupComponents) IsVolumeSupported(providerID *ole.GUID, volumeName string) (bool, error) {
|
||||
volumeNamePointer, err := syscall.UTF16PtrFromString(volumeName)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -374,7 +379,7 @@ func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, err
|
|||
var result uintptr
|
||||
|
||||
if runtime.GOARCH == "386" {
|
||||
id := (*[4]uintptr)(unsafe.Pointer(ole.IID_NULL))
|
||||
id := (*[4]uintptr)(unsafe.Pointer(providerID))
|
||||
|
||||
result, _, _ = syscall.Syscall9(vss.getVTable().isVolumeSupported, 7,
|
||||
uintptr(unsafe.Pointer(vss)), id[0], id[1], id[2], id[3],
|
||||
|
@ -382,7 +387,7 @@ func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, err
|
|||
0)
|
||||
} else {
|
||||
result, _, _ = syscall.Syscall6(vss.getVTable().isVolumeSupported, 4,
|
||||
uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(ole.IID_NULL)),
|
||||
uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(providerID)),
|
||||
uintptr(unsafe.Pointer(volumeNamePointer)), uintptr(unsafe.Pointer(&isSupportedRaw)), 0,
|
||||
0)
|
||||
}
|
||||
|
@ -408,24 +413,24 @@ func (vss *IVssBackupComponents) StartSnapshotSet() (ole.GUID, error) {
|
|||
}
|
||||
|
||||
// AddToSnapshotSet calls the equivalent VSS api.
|
||||
func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, idSnapshot *ole.GUID) error {
|
||||
func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, providerID *ole.GUID, idSnapshot *ole.GUID) error {
|
||||
volumeNamePointer, err := syscall.UTF16PtrFromString(volumeName)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var result uintptr = 0
|
||||
var result uintptr
|
||||
|
||||
if runtime.GOARCH == "386" {
|
||||
id := (*[4]uintptr)(unsafe.Pointer(ole.IID_NULL))
|
||||
id := (*[4]uintptr)(unsafe.Pointer(providerID))
|
||||
|
||||
result, _, _ = syscall.Syscall9(vss.getVTable().addToSnapshotSet, 7,
|
||||
uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)), id[0], id[1],
|
||||
id[2], id[3], uintptr(unsafe.Pointer(idSnapshot)), 0, 0)
|
||||
uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)),
|
||||
id[0], id[1], id[2], id[3], uintptr(unsafe.Pointer(idSnapshot)), 0, 0)
|
||||
} else {
|
||||
result, _, _ = syscall.Syscall6(vss.getVTable().addToSnapshotSet, 4,
|
||||
uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)),
|
||||
uintptr(unsafe.Pointer(ole.IID_NULL)), uintptr(unsafe.Pointer(idSnapshot)), 0, 0)
|
||||
uintptr(unsafe.Pointer(providerID)), uintptr(unsafe.Pointer(idSnapshot)), 0, 0)
|
||||
}
|
||||
|
||||
return newVssErrorIfResultNotOK("AddToSnapshotSet() failed", HRESULT(result))
|
||||
|
@ -478,9 +483,9 @@ func (vss *IVssBackupComponents) DoSnapshotSet() (*IVSSAsync, error) {
|
|||
|
||||
// DeleteSnapshots calls the equivalent VSS api.
|
||||
func (vss *IVssBackupComponents) DeleteSnapshots(snapshotID ole.GUID) (int32, ole.GUID, error) {
|
||||
var deletedSnapshots int32 = 0
|
||||
var deletedSnapshots int32
|
||||
var nondeletedSnapshotID ole.GUID
|
||||
var result uintptr = 0
|
||||
var result uintptr
|
||||
|
||||
if runtime.GOARCH == "386" {
|
||||
id := (*[4]uintptr)(unsafe.Pointer(&snapshotID))
|
||||
|
@ -504,7 +509,7 @@ func (vss *IVssBackupComponents) DeleteSnapshots(snapshotID ole.GUID) (int32, ol
|
|||
// GetSnapshotProperties calls the equivalent VSS api.
|
||||
func (vss *IVssBackupComponents) GetSnapshotProperties(snapshotID ole.GUID,
|
||||
properties *VssSnapshotProperties) error {
|
||||
var result uintptr = 0
|
||||
var result uintptr
|
||||
|
||||
if runtime.GOARCH == "386" {
|
||||
id := (*[4]uintptr)(unsafe.Pointer(&snapshotID))
|
||||
|
@ -527,8 +532,8 @@ func vssFreeSnapshotProperties(properties *VssSnapshotProperties) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
proc.Call(uintptr(unsafe.Pointer(properties)))
|
||||
// this function always succeeds and returns no value
|
||||
_, _, _ = proc.Call(uintptr(unsafe.Pointer(properties)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -543,6 +548,7 @@ func (vss *IVssBackupComponents) BackupComplete() (*IVSSAsync, error) {
|
|||
}
|
||||
|
||||
// VssSnapshotProperties defines the properties of a VSS snapshot as part of the VSS api.
|
||||
// nolint:structcheck
|
||||
type VssSnapshotProperties struct {
|
||||
snapshotID ole.GUID
|
||||
snapshotSetID ole.GUID
|
||||
|
@ -559,6 +565,24 @@ type VssSnapshotProperties struct {
|
|||
status uint
|
||||
}
|
||||
|
||||
// VssProviderProperties defines the properties of a VSS provider as part of the VSS api.
|
||||
// nolint:structcheck
|
||||
type VssProviderProperties struct {
|
||||
providerID ole.GUID
|
||||
providerName *uint16
|
||||
providerType uint32
|
||||
providerVersion *uint16
|
||||
providerVersionID ole.GUID
|
||||
classID ole.GUID
|
||||
}
|
||||
|
||||
func vssFreeProviderProperties(p *VssProviderProperties) {
|
||||
ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerName)))
|
||||
p.providerName = nil
|
||||
ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerVersion)))
|
||||
p.providerVersion = nil
|
||||
}
|
||||
|
||||
// GetSnapshotDeviceObject returns root path to access the snapshot files
|
||||
// and folders.
|
||||
func (p *VssSnapshotProperties) GetSnapshotDeviceObject() string {
|
||||
|
@ -617,8 +641,13 @@ func (vssAsync *IVSSAsync) QueryStatus() (HRESULT, uint32) {
|
|||
|
||||
// WaitUntilAsyncFinished waits until either the async call is finished or
|
||||
// the given timeout is reached.
|
||||
func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(millis uint32) error {
|
||||
hresult := vssAsync.Wait(millis)
|
||||
func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(timeout time.Duration) error {
|
||||
const maxTimeout = math.MaxInt32 * time.Millisecond
|
||||
if timeout > maxTimeout {
|
||||
timeout = maxTimeout
|
||||
}
|
||||
|
||||
hresult := vssAsync.Wait(uint32(timeout.Milliseconds()))
|
||||
err := newVssErrorIfResultNotOK("Wait() failed", hresult)
|
||||
if err != nil {
|
||||
vssAsync.Cancel()
|
||||
|
@ -651,6 +680,75 @@ func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(millis uint32) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// UIID_IVSS_ADMIN defines the GUID of IVSSAdmin.
|
||||
var (
|
||||
UIID_IVSS_ADMIN = ole.NewGUID("{77ED5996-2F63-11d3-8A39-00C04F72D8E3}")
|
||||
CLSID_VSS_COORDINATOR = ole.NewGUID("{E579AB5F-1CC4-44b4-BED9-DE0991FF0623}")
|
||||
)
|
||||
|
||||
// IVSSAdmin VSS api interface.
|
||||
type IVSSAdmin struct {
|
||||
ole.IUnknown
|
||||
}
|
||||
|
||||
// IVSSAdminVTable is the vtable for IVSSAdmin.
|
||||
// nolint:structcheck
|
||||
type IVSSAdminVTable struct {
|
||||
ole.IUnknownVtbl
|
||||
registerProvider uintptr
|
||||
unregisterProvider uintptr
|
||||
queryProviders uintptr
|
||||
abortAllSnapshotsInProgress uintptr
|
||||
}
|
||||
|
||||
// getVTable returns the vtable for IVSSAdmin.
|
||||
func (vssAdmin *IVSSAdmin) getVTable() *IVSSAdminVTable {
|
||||
return (*IVSSAdminVTable)(unsafe.Pointer(vssAdmin.RawVTable))
|
||||
}
|
||||
|
||||
// QueryProviders calls the equivalent VSS api.
|
||||
func (vssAdmin *IVSSAdmin) QueryProviders() (*IVssEnumObject, error) {
|
||||
var enum *IVssEnumObject
|
||||
|
||||
result, _, _ := syscall.Syscall(vssAdmin.getVTable().queryProviders, 2,
|
||||
uintptr(unsafe.Pointer(vssAdmin)), uintptr(unsafe.Pointer(&enum)), 0)
|
||||
|
||||
return enum, newVssErrorIfResultNotOK("QueryProviders() failed", HRESULT(result))
|
||||
}
|
||||
|
||||
// IVssEnumObject VSS api interface.
|
||||
type IVssEnumObject struct {
|
||||
ole.IUnknown
|
||||
}
|
||||
|
||||
// IVssEnumObjectVTable is the vtable for IVssEnumObject.
|
||||
// nolint:structcheck
|
||||
type IVssEnumObjectVTable struct {
|
||||
ole.IUnknownVtbl
|
||||
next uintptr
|
||||
skip uintptr
|
||||
reset uintptr
|
||||
clone uintptr
|
||||
}
|
||||
|
||||
// getVTable returns the vtable for IVssEnumObject.
|
||||
func (vssEnum *IVssEnumObject) getVTable() *IVssEnumObjectVTable {
|
||||
return (*IVssEnumObjectVTable)(unsafe.Pointer(vssEnum.RawVTable))
|
||||
}
|
||||
|
||||
// Next calls the equivalent VSS api.
|
||||
func (vssEnum *IVssEnumObject) Next(count uint, props unsafe.Pointer) (uint, error) {
|
||||
var fetched uint32
|
||||
result, _, _ := syscall.Syscall6(vssEnum.getVTable().next, 4,
|
||||
uintptr(unsafe.Pointer(vssEnum)), uintptr(count), uintptr(props),
|
||||
uintptr(unsafe.Pointer(&fetched)), 0, 0)
|
||||
if HRESULT(result) == S_FALSE {
|
||||
return uint(fetched), nil
|
||||
}
|
||||
|
||||
return uint(fetched), newVssErrorIfResultNotOK("Next() failed", HRESULT(result))
|
||||
}
|
||||
|
||||
// MountPoint wraps all information of a snapshot of a mountpoint on a volume.
|
||||
type MountPoint struct {
|
||||
isSnapshotted bool
|
||||
|
@ -677,7 +775,7 @@ type VssSnapshot struct {
|
|||
snapshotProperties VssSnapshotProperties
|
||||
snapshotDeviceObject string
|
||||
mountPointInfo map[string]MountPoint
|
||||
timeoutInMillis uint32
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// GetSnapshotDeviceObject returns root path to access the snapshot files
|
||||
|
@ -694,7 +792,12 @@ func initializeVssCOMInterface() (*ole.IUnknown, error) {
|
|||
}
|
||||
|
||||
// ensure COM is initialized before use
|
||||
ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
|
||||
if err = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED); err != nil {
|
||||
// CoInitializeEx returns S_FALSE if COM is already initialized
|
||||
if oleErr, ok := err.(*ole.OleError); !ok || HRESULT(oleErr.Code()) != S_FALSE {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var oleIUnknown *ole.IUnknown
|
||||
result, _, _ := vssInstance.Call(uintptr(unsafe.Pointer(&oleIUnknown)))
|
||||
|
@ -727,12 +830,34 @@ func HasSufficientPrivilegesForVSS() error {
|
|||
return err
|
||||
}
|
||||
|
||||
// GetVolumeNameForVolumeMountPoint add trailing backslash to input parameter
|
||||
// and calls the equivalent windows api.
|
||||
func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) {
|
||||
if mountPoint != "" && mountPoint[len(mountPoint)-1] != filepath.Separator {
|
||||
mountPoint += string(filepath.Separator)
|
||||
}
|
||||
|
||||
mountPointPointer, err := syscall.UTF16PtrFromString(mountPoint)
|
||||
if err != nil {
|
||||
return mountPoint, err
|
||||
}
|
||||
|
||||
// A reasonable size for the buffer to accommodate the largest possible
|
||||
// volume GUID path is 50 characters.
|
||||
volumeNameBuffer := make([]uint16, 50)
|
||||
if err := windows.GetVolumeNameForVolumeMountPoint(
|
||||
mountPointPointer, &volumeNameBuffer[0], 50); err != nil {
|
||||
return mountPoint, err
|
||||
}
|
||||
|
||||
return syscall.UTF16ToString(volumeNameBuffer), nil
|
||||
}
|
||||
|
||||
// NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't
|
||||
// finish within the timeout an error is returned.
|
||||
func NewVssSnapshot(
|
||||
volume string, timeoutInSeconds uint, msgError ErrorHandler) (VssSnapshot, error) {
|
||||
func NewVssSnapshot(provider string,
|
||||
volume string, timeout time.Duration, filter VolumeFilter, msgError ErrorHandler) (VssSnapshot, error) {
|
||||
is64Bit, err := isRunningOn64BitWindows()
|
||||
|
||||
if err != nil {
|
||||
return VssSnapshot{}, newVssTextError(fmt.Sprintf(
|
||||
"Failed to detect windows architecture: %s", err.Error()))
|
||||
|
@ -744,7 +869,7 @@ func NewVssSnapshot(
|
|||
runtime.GOARCH))
|
||||
}
|
||||
|
||||
timeoutInMillis := uint32(timeoutInSeconds * 1000)
|
||||
deadline := time.Now().Add(timeout)
|
||||
|
||||
oleIUnknown, err := initializeVssCOMInterface()
|
||||
if oleIUnknown != nil {
|
||||
|
@ -778,6 +903,12 @@ func NewVssSnapshot(
|
|||
|
||||
iVssBackupComponents := (*IVssBackupComponents)(unsafe.Pointer(comInterface))
|
||||
|
||||
providerID, err := getProviderID(provider)
|
||||
if err != nil {
|
||||
iVssBackupComponents.Release()
|
||||
return VssSnapshot{}, err
|
||||
}
|
||||
|
||||
if err := iVssBackupComponents.InitializeForBackup(); err != nil {
|
||||
iVssBackupComponents.Release()
|
||||
return VssSnapshot{}, err
|
||||
|
@ -796,13 +927,13 @@ func NewVssSnapshot(
|
|||
}
|
||||
|
||||
err = callAsyncFunctionAndWait(iVssBackupComponents.GatherWriterMetadata,
|
||||
"GatherWriterMetadata", timeoutInMillis)
|
||||
"GatherWriterMetadata", deadline)
|
||||
if err != nil {
|
||||
iVssBackupComponents.Release()
|
||||
return VssSnapshot{}, err
|
||||
}
|
||||
|
||||
if isSupported, err := iVssBackupComponents.IsVolumeSupported(volume); err != nil {
|
||||
if isSupported, err := iVssBackupComponents.IsVolumeSupported(providerID, volume); err != nil {
|
||||
iVssBackupComponents.Release()
|
||||
return VssSnapshot{}, err
|
||||
} else if !isSupported {
|
||||
|
@ -817,44 +948,53 @@ func NewVssSnapshot(
|
|||
return VssSnapshot{}, err
|
||||
}
|
||||
|
||||
if err := iVssBackupComponents.AddToSnapshotSet(volume, &snapshotSetID); err != nil {
|
||||
if err := iVssBackupComponents.AddToSnapshotSet(volume, providerID, &snapshotSetID); err != nil {
|
||||
iVssBackupComponents.Release()
|
||||
return VssSnapshot{}, err
|
||||
}
|
||||
|
||||
mountPoints, err := enumerateMountedFolders(volume)
|
||||
if err != nil {
|
||||
iVssBackupComponents.Release()
|
||||
return VssSnapshot{}, newVssTextError(fmt.Sprintf(
|
||||
"failed to enumerate mount points for volume %s: %s", volume, err))
|
||||
}
|
||||
|
||||
mountPointInfo := make(map[string]MountPoint)
|
||||
|
||||
for _, mountPoint := range mountPoints {
|
||||
// ensure every mountpoint is available even without a valid
|
||||
// snapshot because we need to consider this when backing up files
|
||||
mountPointInfo[mountPoint] = MountPoint{isSnapshotted: false}
|
||||
|
||||
if isSupported, err := iVssBackupComponents.IsVolumeSupported(mountPoint); err != nil {
|
||||
continue
|
||||
} else if !isSupported {
|
||||
continue
|
||||
}
|
||||
|
||||
var mountPointSnapshotSetID ole.GUID
|
||||
err := iVssBackupComponents.AddToSnapshotSet(mountPoint, &mountPointSnapshotSetID)
|
||||
// if filter==nil just don't process mount points for this volume at all
|
||||
if filter != nil {
|
||||
mountPoints, err := enumerateMountedFolders(volume)
|
||||
if err != nil {
|
||||
iVssBackupComponents.Release()
|
||||
return VssSnapshot{}, err
|
||||
|
||||
return VssSnapshot{}, newVssTextError(fmt.Sprintf(
|
||||
"failed to enumerate mount points for volume %s: %s", volume, err))
|
||||
}
|
||||
|
||||
mountPointInfo[mountPoint] = MountPoint{isSnapshotted: true,
|
||||
snapshotSetID: mountPointSnapshotSetID}
|
||||
for _, mountPoint := range mountPoints {
|
||||
// ensure every mountpoint is available even without a valid
|
||||
// snapshot because we need to consider this when backing up files
|
||||
mountPointInfo[mountPoint] = MountPoint{isSnapshotted: false}
|
||||
|
||||
if !filter(mountPoint) {
|
||||
continue
|
||||
} else if isSupported, err := iVssBackupComponents.IsVolumeSupported(providerID, mountPoint); err != nil {
|
||||
continue
|
||||
} else if !isSupported {
|
||||
continue
|
||||
}
|
||||
|
||||
var mountPointSnapshotSetID ole.GUID
|
||||
err := iVssBackupComponents.AddToSnapshotSet(mountPoint, providerID, &mountPointSnapshotSetID)
|
||||
if err != nil {
|
||||
iVssBackupComponents.Release()
|
||||
|
||||
return VssSnapshot{}, err
|
||||
}
|
||||
|
||||
mountPointInfo[mountPoint] = MountPoint{
|
||||
isSnapshotted: true,
|
||||
snapshotSetID: mountPointSnapshotSetID,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = callAsyncFunctionAndWait(iVssBackupComponents.PrepareForBackup, "PrepareForBackup",
|
||||
timeoutInMillis)
|
||||
deadline)
|
||||
if err != nil {
|
||||
// After calling PrepareForBackup one needs to call AbortBackup() before releasing the VSS
|
||||
// instance for proper cleanup.
|
||||
|
@ -865,9 +1005,9 @@ func NewVssSnapshot(
|
|||
}
|
||||
|
||||
err = callAsyncFunctionAndWait(iVssBackupComponents.DoSnapshotSet, "DoSnapshotSet",
|
||||
timeoutInMillis)
|
||||
deadline)
|
||||
if err != nil {
|
||||
iVssBackupComponents.AbortBackup()
|
||||
_ = iVssBackupComponents.AbortBackup()
|
||||
iVssBackupComponents.Release()
|
||||
return VssSnapshot{}, err
|
||||
}
|
||||
|
@ -875,13 +1015,12 @@ func NewVssSnapshot(
|
|||
var snapshotProperties VssSnapshotProperties
|
||||
err = iVssBackupComponents.GetSnapshotProperties(snapshotSetID, &snapshotProperties)
|
||||
if err != nil {
|
||||
iVssBackupComponents.AbortBackup()
|
||||
_ = iVssBackupComponents.AbortBackup()
|
||||
iVssBackupComponents.Release()
|
||||
return VssSnapshot{}, err
|
||||
}
|
||||
|
||||
for mountPoint, info := range mountPointInfo {
|
||||
|
||||
if !info.isSnapshotted {
|
||||
continue
|
||||
}
|
||||
|
@ -900,8 +1039,10 @@ func NewVssSnapshot(
|
|||
mountPointInfo[mountPoint] = info
|
||||
}
|
||||
|
||||
return VssSnapshot{iVssBackupComponents, snapshotSetID, snapshotProperties,
|
||||
snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, timeoutInMillis}, nil
|
||||
return VssSnapshot{
|
||||
iVssBackupComponents, snapshotSetID, snapshotProperties,
|
||||
snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, time.Until(deadline),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Delete deletes the created snapshot.
|
||||
|
@ -922,15 +1063,17 @@ func (p *VssSnapshot) Delete() error {
|
|||
if p.iVssBackupComponents != nil {
|
||||
defer p.iVssBackupComponents.Release()
|
||||
|
||||
deadline := time.Now().Add(p.timeout)
|
||||
|
||||
err = callAsyncFunctionAndWait(p.iVssBackupComponents.BackupComplete, "BackupComplete",
|
||||
p.timeoutInMillis)
|
||||
deadline)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, _, e := p.iVssBackupComponents.DeleteSnapshots(p.snapshotID); e != nil {
|
||||
err = newVssTextError(fmt.Sprintf("Failed to delete snapshot: %s", e.Error()))
|
||||
p.iVssBackupComponents.AbortBackup()
|
||||
_ = p.iVssBackupComponents.AbortBackup()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -940,12 +1083,61 @@ func (p *VssSnapshot) Delete() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func getProviderID(provider string) (*ole.GUID, error) {
|
||||
providerLower := strings.ToLower(provider)
|
||||
switch providerLower {
|
||||
case "":
|
||||
return ole.IID_NULL, nil
|
||||
case "ms":
|
||||
return ole.NewGUID("{b5946137-7b9f-4925-af80-51abd60b20d5}"), nil
|
||||
}
|
||||
|
||||
comInterface, err := ole.CreateInstance(CLSID_VSS_COORDINATOR, UIID_IVSS_ADMIN)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer comInterface.Release()
|
||||
|
||||
vssAdmin := (*IVSSAdmin)(unsafe.Pointer(comInterface))
|
||||
|
||||
enum, err := vssAdmin.QueryProviders()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer enum.Release()
|
||||
|
||||
id := ole.NewGUID(provider)
|
||||
|
||||
var props struct {
|
||||
objectType uint32
|
||||
provider VssProviderProperties
|
||||
}
|
||||
for {
|
||||
count, err := enum.Next(1, unsafe.Pointer(&props))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if count < 1 {
|
||||
return nil, errors.Errorf(`invalid VSS provider "%s"`, provider)
|
||||
}
|
||||
|
||||
name := ole.UTF16PtrToString(props.provider.providerName)
|
||||
vssFreeProviderProperties(&props.provider)
|
||||
|
||||
if id != nil && *id == props.provider.providerID ||
|
||||
id == nil && providerLower == strings.ToLower(name) {
|
||||
return &props.provider.providerID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// asyncCallFunc is the callback type for callAsyncFunctionAndWait.
|
||||
type asyncCallFunc func() (*IVSSAsync, error)
|
||||
|
||||
// callAsyncFunctionAndWait calls an async functions and waits for it to either
|
||||
// finish or timeout.
|
||||
func callAsyncFunctionAndWait(function asyncCallFunc, name string, timeoutInMillis uint32) error {
|
||||
func callAsyncFunctionAndWait(function asyncCallFunc, name string, deadline time.Time) error {
|
||||
iVssAsync, err := function()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -955,7 +1147,12 @@ func callAsyncFunctionAndWait(function asyncCallFunc, name string, timeoutInMill
|
|||
return newVssTextError(fmt.Sprintf("%s() returned nil", name))
|
||||
}
|
||||
|
||||
err = iVssAsync.WaitUntilAsyncFinished(timeoutInMillis)
|
||||
timeout := time.Until(deadline)
|
||||
if timeout <= 0 {
|
||||
return newVssTextError(fmt.Sprintf("%s() deadline exceeded", name))
|
||||
}
|
||||
|
||||
err = iVssAsync.WaitUntilAsyncFinished(timeout)
|
||||
iVssAsync.Release()
|
||||
return err
|
||||
}
|
||||
|
@ -1036,6 +1233,7 @@ func enumerateMountedFolders(volume string) ([]string, error) {
|
|||
return mountedFolders, nil
|
||||
}
|
||||
|
||||
// nolint:errcheck
|
||||
defer windows.FindVolumeMountPointClose(handle)
|
||||
|
||||
volumeMountPoint := syscall.UTF16ToString(volumeMountPointBuffer)
|
||||
|
|
|
@ -79,13 +79,8 @@ func repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito
|
|||
for t := range downloadQueue {
|
||||
err := repo.LoadBlobsFromPack(wgCtx, t.PackID, t.Blobs, func(blob restic.BlobHandle, buf []byte, err error) error {
|
||||
if err != nil {
|
||||
var ierr error
|
||||
// check whether we can get a valid copy somewhere else
|
||||
buf, ierr = repo.LoadBlob(wgCtx, blob.Type, blob.ID, nil)
|
||||
if ierr != nil {
|
||||
// no luck, return the original error
|
||||
return err
|
||||
}
|
||||
// a required blob couldn't be retrieved
|
||||
return err
|
||||
}
|
||||
|
||||
keepMutex.Lock()
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package repository
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
@ -12,7 +11,6 @@ import (
|
|||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/restic/chunker"
|
||||
"github.com/restic/restic/internal/backend"
|
||||
|
@ -29,8 +27,6 @@ import (
|
|||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const MaxStreamBufferSize = 4 * 1024 * 1024
|
||||
|
||||
const MinPackSize = 4 * 1024 * 1024
|
||||
const DefaultPackSize = 16 * 1024 * 1024
|
||||
const MaxPackSize = 128 * 1024 * 1024
|
||||
|
@ -966,19 +962,21 @@ func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte
|
|||
}
|
||||
|
||||
type backendLoadFn func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error
|
||||
type loadBlobFn func(ctx context.Context, t restic.BlobType, id restic.ID, buf []byte) ([]byte, error)
|
||||
|
||||
// Skip sections with more than 4MB unused blobs
|
||||
const maxUnusedRange = 4 * 1024 * 1024
|
||||
// Skip sections with more than 1MB unused blobs
|
||||
const maxUnusedRange = 1 * 1024 * 1024
|
||||
|
||||
// LoadBlobsFromPack loads the listed blobs from the specified pack file. The plaintext blob is passed to
|
||||
// the handleBlobFn callback or an error if decryption failed or the blob hash does not match.
|
||||
// handleBlobFn is called at most once for each blob. If the callback returns an error,
|
||||
// then LoadBlobsFromPack will abort and not retry it.
|
||||
// then LoadBlobsFromPack will abort and not retry it. The buf passed to the callback is only valid within
|
||||
// this specific call. The callback must not keep a reference to buf.
|
||||
func (r *Repository) LoadBlobsFromPack(ctx context.Context, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error {
|
||||
return streamPack(ctx, r.Backend().Load, r.key, packID, blobs, handleBlobFn)
|
||||
return streamPack(ctx, r.Backend().Load, r.LoadBlob, r.getZstdDecoder(), r.key, packID, blobs, handleBlobFn)
|
||||
}
|
||||
|
||||
func streamPack(ctx context.Context, beLoad backendLoadFn, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error {
|
||||
func streamPack(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBlobFn, dec *zstd.Decoder, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error {
|
||||
if len(blobs) == 0 {
|
||||
// nothing to do
|
||||
return nil
|
||||
|
@ -990,14 +988,29 @@ func streamPack(ctx context.Context, beLoad backendLoadFn, key *crypto.Key, pack
|
|||
|
||||
lowerIdx := 0
|
||||
lastPos := blobs[0].Offset
|
||||
const maxChunkSize = 2 * DefaultPackSize
|
||||
|
||||
for i := 0; i < len(blobs); i++ {
|
||||
if blobs[i].Offset < lastPos {
|
||||
// don't wait for streamPackPart to fail
|
||||
return errors.Errorf("overlapping blobs in pack %v", packID)
|
||||
}
|
||||
|
||||
chunkSizeAfter := (blobs[i].Offset + blobs[i].Length) - blobs[lowerIdx].Offset
|
||||
split := false
|
||||
// split if the chunk would become larger than maxChunkSize. Oversized chunks are
|
||||
// handled by the requirement that the chunk contains at least one blob (i > lowerIdx)
|
||||
if i > lowerIdx && chunkSizeAfter >= maxChunkSize {
|
||||
split = true
|
||||
}
|
||||
// skip too large gaps as a new request is typically much cheaper than data transfers
|
||||
if blobs[i].Offset-lastPos > maxUnusedRange {
|
||||
split = true
|
||||
}
|
||||
|
||||
if split {
|
||||
// load everything up to the skipped file section
|
||||
err := streamPackPart(ctx, beLoad, key, packID, blobs[lowerIdx:i], handleBlobFn)
|
||||
err := streamPackPart(ctx, beLoad, loadBlobFn, dec, key, packID, blobs[lowerIdx:i], handleBlobFn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1006,10 +1019,10 @@ func streamPack(ctx context.Context, beLoad backendLoadFn, key *crypto.Key, pack
|
|||
lastPos = blobs[i].Offset + blobs[i].Length
|
||||
}
|
||||
// load remainder
|
||||
return streamPackPart(ctx, beLoad, key, packID, blobs[lowerIdx:], handleBlobFn)
|
||||
return streamPackPart(ctx, beLoad, loadBlobFn, dec, key, packID, blobs[lowerIdx:], handleBlobFn)
|
||||
}
|
||||
|
||||
func streamPackPart(ctx context.Context, beLoad backendLoadFn, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error {
|
||||
func streamPackPart(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBlobFn, dec *zstd.Decoder, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error {
|
||||
h := backend.Handle{Type: restic.PackFile, Name: packID.String(), IsMetadata: false}
|
||||
|
||||
dataStart := blobs[0].Offset
|
||||
|
@ -1017,57 +1030,108 @@ func streamPackPart(ctx context.Context, beLoad backendLoadFn, key *crypto.Key,
|
|||
|
||||
debug.Log("streaming pack %v (%d to %d bytes), blobs: %v", packID, dataStart, dataEnd, len(blobs))
|
||||
|
||||
dec, err := zstd.NewReader(nil)
|
||||
if err != nil {
|
||||
panic(dec)
|
||||
}
|
||||
defer dec.Close()
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
// stream blobs in pack
|
||||
err = beLoad(ctx, h, int(dataEnd-dataStart), int64(dataStart), func(rd io.Reader) error {
|
||||
// prevent callbacks after cancellation
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
bufferSize := int(dataEnd - dataStart)
|
||||
if bufferSize > MaxStreamBufferSize {
|
||||
bufferSize = MaxStreamBufferSize
|
||||
}
|
||||
bufRd := bufio.NewReaderSize(rd, bufferSize)
|
||||
it := NewPackBlobIterator(packID, bufRd, dataStart, blobs, key, dec)
|
||||
|
||||
for {
|
||||
val, err := it.Next()
|
||||
if err == ErrPackEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = handleBlobFn(val.Handle, val.Plaintext, val.Err)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return backoff.Permanent(err)
|
||||
}
|
||||
// ensure that each blob is only passed once to handleBlobFn
|
||||
blobs = blobs[1:]
|
||||
}
|
||||
return nil
|
||||
data := make([]byte, int(dataEnd-dataStart))
|
||||
err := beLoad(ctx, h, int(dataEnd-dataStart), int64(dataStart), func(rd io.Reader) error {
|
||||
_, cerr := io.ReadFull(rd, data)
|
||||
return cerr
|
||||
})
|
||||
// prevent callbacks after cancellation
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if err != nil {
|
||||
// the context is only still valid if handleBlobFn never returned an error
|
||||
if loadBlobFn != nil {
|
||||
// check whether we can get the remaining blobs somewhere else
|
||||
for _, entry := range blobs {
|
||||
buf, ierr := loadBlobFn(ctx, entry.Type, entry.ID, nil)
|
||||
err = handleBlobFn(entry.BlobHandle, buf, ierr)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors.Wrap(err, "StreamPack")
|
||||
}
|
||||
|
||||
it := NewPackBlobIterator(packID, newByteReader(data), dataStart, blobs, key, dec)
|
||||
|
||||
for {
|
||||
val, err := it.Next()
|
||||
if err == ErrPackEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if val.Err != nil && loadBlobFn != nil {
|
||||
var ierr error
|
||||
// check whether we can get a valid copy somewhere else
|
||||
buf, ierr := loadBlobFn(ctx, val.Handle.Type, val.Handle.ID, nil)
|
||||
if ierr == nil {
|
||||
// success
|
||||
val.Plaintext = buf
|
||||
val.Err = nil
|
||||
}
|
||||
}
|
||||
|
||||
err = handleBlobFn(val.Handle, val.Plaintext, val.Err)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// ensure that each blob is only passed once to handleBlobFn
|
||||
blobs = blobs[1:]
|
||||
}
|
||||
|
||||
return errors.Wrap(err, "StreamPack")
|
||||
}
|
||||
|
||||
// discardReader allows the PackBlobIterator to perform zero copy
|
||||
// reads if the underlying data source is a byte slice.
|
||||
type discardReader interface {
|
||||
Discard(n int) (discarded int, err error)
|
||||
// ReadFull reads the next n bytes into a byte slice. The caller must not
|
||||
// retain a reference to the byte. Modifications are only allowed within
|
||||
// the boundaries of the returned slice.
|
||||
ReadFull(n int) (buf []byte, err error)
|
||||
}
|
||||
|
||||
type byteReader struct {
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func newByteReader(buf []byte) *byteReader {
|
||||
return &byteReader{
|
||||
buf: buf,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *byteReader) Discard(n int) (discarded int, err error) {
|
||||
if len(b.buf) < n {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b.buf = b.buf[n:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (b *byteReader) ReadFull(n int) (buf []byte, err error) {
|
||||
if len(b.buf) < n {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
buf = b.buf[:n]
|
||||
b.buf = b.buf[n:]
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
type PackBlobIterator struct {
|
||||
packID restic.ID
|
||||
rd *bufio.Reader
|
||||
rd discardReader
|
||||
currentOffset uint
|
||||
|
||||
blobs []restic.Blob
|
||||
key *crypto.Key
|
||||
dec *zstd.Decoder
|
||||
|
||||
buf []byte
|
||||
decode []byte
|
||||
}
|
||||
|
||||
|
@ -1079,7 +1143,7 @@ type PackBlobValue struct {
|
|||
|
||||
var ErrPackEOF = errors.New("reached EOF of pack file")
|
||||
|
||||
func NewPackBlobIterator(packID restic.ID, rd *bufio.Reader, currentOffset uint,
|
||||
func NewPackBlobIterator(packID restic.ID, rd discardReader, currentOffset uint,
|
||||
blobs []restic.Blob, key *crypto.Key, dec *zstd.Decoder) *PackBlobIterator {
|
||||
return &PackBlobIterator{
|
||||
packID: packID,
|
||||
|
@ -1114,21 +1178,12 @@ func (b *PackBlobIterator) Next() (PackBlobValue, error) {
|
|||
h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
|
||||
debug.Log(" process blob %v, skipped %d, %v", h, skipBytes, entry)
|
||||
|
||||
if uint(cap(b.buf)) < entry.Length {
|
||||
b.buf = make([]byte, entry.Length)
|
||||
}
|
||||
b.buf = b.buf[:entry.Length]
|
||||
|
||||
n, err := io.ReadFull(b.rd, b.buf)
|
||||
buf, err := b.rd.ReadFull(int(entry.Length))
|
||||
if err != nil {
|
||||
debug.Log(" read error %v", err)
|
||||
return PackBlobValue{}, fmt.Errorf("readFull: %w", err)
|
||||
}
|
||||
|
||||
if n != len(b.buf) {
|
||||
return PackBlobValue{}, fmt.Errorf("read blob %v from %v: not enough bytes read, want %v, got %v",
|
||||
h, b.packID.Str(), len(b.buf), n)
|
||||
}
|
||||
b.currentOffset = entry.Offset + entry.Length
|
||||
|
||||
if int(entry.Length) <= b.key.NonceSize() {
|
||||
|
@ -1137,7 +1192,7 @@ func (b *PackBlobIterator) Next() (PackBlobValue, error) {
|
|||
}
|
||||
|
||||
// decryption errors are likely permanent, give the caller a chance to skip them
|
||||
nonce, ciphertext := b.buf[:b.key.NonceSize()], b.buf[b.key.NonceSize():]
|
||||
nonce, ciphertext := buf[:b.key.NonceSize()], buf[b.key.NonceSize():]
|
||||
plaintext, err := b.key.Open(ciphertext[:0], nonce, ciphertext, nil)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("decrypting blob %v from %v failed: %w", h, b.packID.Str(), err)
|
||||
|
|
|
@ -146,14 +146,14 @@ func TestStreamPack(t *testing.T) {
|
|||
}
|
||||
|
||||
func testStreamPack(t *testing.T, version uint) {
|
||||
// always use the same key for deterministic output
|
||||
const jsonKey = `{"mac":{"k":"eQenuI8adktfzZMuC8rwdA==","r":"k8cfAly2qQSky48CQK7SBA=="},"encrypt":"MKO9gZnRiQFl8mDUurSDa9NMjiu9MUifUrODTHS05wo="}`
|
||||
|
||||
var key crypto.Key
|
||||
err := json.Unmarshal([]byte(jsonKey), &key)
|
||||
dec, err := zstd.NewReader(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
panic(dec)
|
||||
}
|
||||
defer dec.Close()
|
||||
|
||||
// always use the same key for deterministic output
|
||||
key := testKey(t)
|
||||
|
||||
blobSizes := []int{
|
||||
5522811,
|
||||
|
@ -276,7 +276,7 @@ func testStreamPack(t *testing.T, version uint) {
|
|||
|
||||
loadCalls = 0
|
||||
shortFirstLoad = test.shortFirstLoad
|
||||
err = streamPack(ctx, load, &key, restic.ID{}, test.blobs, handleBlob)
|
||||
err := streamPack(ctx, load, nil, dec, &key, restic.ID{}, test.blobs, handleBlob)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -339,7 +339,7 @@ func testStreamPack(t *testing.T, version uint) {
|
|||
return err
|
||||
}
|
||||
|
||||
err = streamPack(ctx, load, &key, restic.ID{}, test.blobs, handleBlob)
|
||||
err := streamPack(ctx, load, nil, dec, &key, restic.ID{}, test.blobs, handleBlob)
|
||||
if err == nil {
|
||||
t.Fatalf("wanted error %v, got nil", test.err)
|
||||
}
|
||||
|
@ -449,3 +449,83 @@ func TestUnpackedVerification(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testKey(t *testing.T) crypto.Key {
|
||||
const jsonKey = `{"mac":{"k":"eQenuI8adktfzZMuC8rwdA==","r":"k8cfAly2qQSky48CQK7SBA=="},"encrypt":"MKO9gZnRiQFl8mDUurSDa9NMjiu9MUifUrODTHS05wo="}`
|
||||
|
||||
var key crypto.Key
|
||||
err := json.Unmarshal([]byte(jsonKey), &key)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
func TestStreamPackFallback(t *testing.T) {
|
||||
dec, err := zstd.NewReader(nil)
|
||||
if err != nil {
|
||||
panic(dec)
|
||||
}
|
||||
defer dec.Close()
|
||||
|
||||
test := func(t *testing.T, failLoad bool) {
|
||||
key := testKey(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
plaintext := rtest.Random(800, 42)
|
||||
blobID := restic.Hash(plaintext)
|
||||
blobs := []restic.Blob{
|
||||
{
|
||||
Length: uint(crypto.CiphertextLength(len(plaintext))),
|
||||
Offset: 0,
|
||||
BlobHandle: restic.BlobHandle{
|
||||
ID: blobID,
|
||||
Type: restic.DataBlob,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var loadPack backendLoadFn
|
||||
if failLoad {
|
||||
loadPack = func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
|
||||
return errors.New("load error")
|
||||
}
|
||||
} else {
|
||||
loadPack = func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
|
||||
// just return an empty array to provoke an error
|
||||
data := make([]byte, length)
|
||||
return fn(bytes.NewReader(data))
|
||||
}
|
||||
}
|
||||
|
||||
loadBlob := func(ctx context.Context, t restic.BlobType, id restic.ID, buf []byte) ([]byte, error) {
|
||||
if id == blobID {
|
||||
return plaintext, nil
|
||||
}
|
||||
return nil, errors.New("unknown blob")
|
||||
}
|
||||
|
||||
blobOK := false
|
||||
handleBlob := func(blob restic.BlobHandle, buf []byte, err error) error {
|
||||
rtest.OK(t, err)
|
||||
rtest.Equals(t, blobID, blob.ID)
|
||||
rtest.Equals(t, plaintext, buf)
|
||||
blobOK = true
|
||||
return err
|
||||
}
|
||||
|
||||
err := streamPack(ctx, loadPack, loadBlob, dec, &key, restic.ID{}, blobs, handleBlob)
|
||||
rtest.OK(t, err)
|
||||
rtest.Assert(t, blobOK, "blob failed to load")
|
||||
}
|
||||
|
||||
t.Run("corrupted blob", func(t *testing.T) {
|
||||
test(t, false)
|
||||
})
|
||||
|
||||
// test fallback for failed pack loading
|
||||
t.Run("failed load", func(t *testing.T) {
|
||||
test(t, true)
|
||||
})
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue