From dca9b6f5dbd28f9c7e7134e7edd0a8eba0233ee2 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Dec 2020 23:36:45 +0100 Subject: [PATCH] azure: explicitly pass upload size Previously the fallback from the azure library was to read the whole blob into memory and use that to determine the upload size. --- internal/backend/azure/azure.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index 33162c227..53064702f 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -4,7 +4,6 @@ import ( "context" "encoding/base64" "io" - "io/ioutil" "net/http" "os" "path" @@ -118,6 +117,16 @@ func (be *Backend) Path() string { return be.prefix } +type azureAdapter struct { + restic.RewindReader +} + +func (azureAdapter) Close() error { return nil } + +func (a *azureAdapter) Len() int { + return int(a.Length()) +} + // Save stores data in the backend at the handle. func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { if err := h.Valid(); err != nil { @@ -135,7 +144,8 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindRe var err error if rd.Length() < 256*1024*1024 { // wrap the reader so that net/http client cannot close the reader - dataReader := ioutil.NopCloser(rd) + // CreateBlockBlobFromReader reads length from `Len()`` + dataReader := azureAdapter{rd} // if it's smaller than 256miB, then just create the file directly from the reader err = be.container.GetBlobReference(objName).CreateBlockBlobFromReader(dataReader, nil)