Merge pull request #2850 from greatroar/packer-malloc

Decrease allocation rate in internal/pack
This commit is contained in:
Alexander Neumann 2020-11-15 17:19:49 +01:00 committed by GitHub
commit 9968220652
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 88 additions and 60 deletions

View File

@ -1,7 +1,6 @@
package pack package pack
import ( import (
"bytes"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"io" "io"
@ -49,7 +48,8 @@ func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte) (int, error)
var entrySize = uint(binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + len(restic.ID{})) var entrySize = uint(binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + len(restic.ID{}))
// headerEntry is used with encoding/binary to read and write header entries // headerEntry describes the format of header entries. It serves only as
// documentation.
type headerEntry struct { type headerEntry struct {
Type uint8 Type uint8
Length uint32 Length uint32
@ -64,16 +64,15 @@ func (p *Packer) Finalize() (uint, error) {
bytesWritten := p.bytes bytesWritten := p.bytes
hdrBuf := bytes.NewBuffer(nil) header, err := p.makeHeader()
bytesHeader, err := p.writeHeader(hdrBuf)
if err != nil { if err != nil {
return 0, err return 0, err
} }
encryptedHeader := make([]byte, 0, hdrBuf.Len()+p.k.Overhead()+p.k.NonceSize()) encryptedHeader := make([]byte, 0, len(header)+p.k.Overhead()+p.k.NonceSize())
nonce := crypto.NewRandomNonce() nonce := crypto.NewRandomNonce()
encryptedHeader = append(encryptedHeader, nonce...) encryptedHeader = append(encryptedHeader, nonce...)
encryptedHeader = p.k.Seal(encryptedHeader, nonce, hdrBuf.Bytes(), nil) encryptedHeader = p.k.Seal(encryptedHeader, nonce, header, nil)
// append the header // append the header
n, err := p.wr.Write(encryptedHeader) n, err := p.wr.Write(encryptedHeader)
@ -81,7 +80,7 @@ func (p *Packer) Finalize() (uint, error) {
return 0, errors.Wrap(err, "Write") return 0, errors.Wrap(err, "Write")
} }
hdrBytes := restic.CiphertextLength(int(bytesHeader)) hdrBytes := restic.CiphertextLength(len(header))
if n != hdrBytes { if n != hdrBytes {
return 0, errors.New("wrong number of bytes written") return 0, errors.New("wrong number of bytes written")
} }
@ -99,32 +98,27 @@ func (p *Packer) Finalize() (uint, error) {
return bytesWritten, nil return bytesWritten, nil
} }
// writeHeader constructs and writes the header to wr. // makeHeader constructs the header for p.
func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) { func (p *Packer) makeHeader() ([]byte, error) {
for _, b := range p.blobs { buf := make([]byte, 0, len(p.blobs)*int(entrySize))
entry := headerEntry{
Length: uint32(b.Length),
ID: b.ID,
}
for _, b := range p.blobs {
switch b.Type { switch b.Type {
case restic.DataBlob: case restic.DataBlob:
entry.Type = 0 buf = append(buf, 0)
case restic.TreeBlob: case restic.TreeBlob:
entry.Type = 1 buf = append(buf, 1)
default: default:
return 0, errors.Errorf("invalid blob type %v", b.Type) return nil, errors.Errorf("invalid blob type %v", b.Type)
} }
err := binary.Write(wr, binary.LittleEndian, entry) var lenLE [4]byte
if err != nil { binary.LittleEndian.PutUint32(lenLE[:], uint32(b.Length))
return bytesWritten, errors.Wrap(err, "binary.Write") buf = append(buf, lenLE[:]...)
} buf = append(buf, b.ID[:]...)
bytesWritten += entrySize
} }
return return buf, nil
} }
// Size returns the number of bytes written so far. // Size returns the number of bytes written so far.
@ -151,11 +145,6 @@ func (p *Packer) Blobs() []restic.Blob {
return p.blobs return p.blobs
} }
// Writer return the underlying writer.
func (p *Packer) Writer() io.Writer {
return p.wr
}
func (p *Packer) String() string { func (p *Packer) String() string {
return fmt.Sprintf("<Packer %d blobs, %d bytes>", len(p.blobs), p.bytes) return fmt.Sprintf("<Packer %d blobs, %d bytes>", len(p.blobs), p.bytes)
} }
@ -280,40 +269,19 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, err
return nil, err return nil, err
} }
hdrRd := bytes.NewReader(buf)
entries = make([]restic.Blob, 0, uint(len(buf))/entrySize) entries = make([]restic.Blob, 0, uint(len(buf))/entrySize)
pos := uint(0) pos := uint(0)
for { for len(buf) > 0 {
e := headerEntry{} entry, err := parseHeaderEntry(buf)
err = binary.Read(hdrRd, binary.LittleEndian, &e)
if errors.Cause(err) == io.EOF {
break
}
if err != nil { if err != nil {
return nil, errors.Wrap(err, "binary.Read") return nil, err
}
entry := restic.Blob{
Length: uint(e.Length),
ID: e.ID,
Offset: pos,
}
switch e.Type {
case 0:
entry.Type = restic.DataBlob
case 1:
entry.Type = restic.TreeBlob
default:
return nil, errors.Errorf("invalid type %d", e.Type)
} }
entry.Offset = pos
entries = append(entries, entry) entries = append(entries, entry)
pos += entry.Length
pos += uint(e.Length) buf = buf[entrySize:]
} }
return entries, nil return entries, nil
@ -323,3 +291,25 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, err
func PackedSizeOfBlob(blobLength uint) uint { func PackedSizeOfBlob(blobLength uint) uint {
return blobLength + entrySize return blobLength + entrySize
} }
func parseHeaderEntry(p []byte) (b restic.Blob, err error) {
if uint(len(p)) < entrySize {
err = errors.Errorf("parseHeaderEntry: buffer of size %d too short", len(p))
return b, err
}
p = p[:entrySize]
switch p[0] {
case 0:
b.Type = restic.DataBlob
case 1:
b.Type = restic.TreeBlob
default:
return b, errors.Errorf("invalid type %d", p[0])
}
b.Length = uint(binary.LittleEndian.Uint32(p[1:5]))
copy(b.ID[:], p[5:])
return b, nil
}

View File

@ -7,9 +7,44 @@ import (
"testing" "testing"
"github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test"
) )
func TestParseHeaderEntry(t *testing.T) {
h := headerEntry{
Type: 0, // Blob.
Length: 100,
}
for i := range h.ID {
h.ID[i] = byte(i)
}
buf := new(bytes.Buffer)
_ = binary.Write(buf, binary.LittleEndian, &h)
b, err := parseHeaderEntry(buf.Bytes())
rtest.OK(t, err)
rtest.Equals(t, restic.DataBlob, b.Type)
t.Logf("%v %v", h.ID, b.ID)
rtest.Assert(t, bytes.Equal(h.ID[:], b.ID[:]), "id mismatch")
rtest.Equals(t, uint(h.Length), b.Length)
h.Type = 0xae
buf.Reset()
_ = binary.Write(buf, binary.LittleEndian, &h)
b, err = parseHeaderEntry(buf.Bytes())
rtest.Assert(t, err != nil, "no error for invalid type")
h.Type = 0
buf.Reset()
_ = binary.Write(buf, binary.LittleEndian, &h)
b, err = parseHeaderEntry(buf.Bytes()[:entrySize-1])
rtest.Assert(t, err != nil, "no error for short input")
}
type countingReaderAt struct { type countingReaderAt struct {
delegate io.ReaderAt delegate io.ReaderAt
invocationCount int invocationCount int

View File

@ -36,7 +36,8 @@ func newPack(t testing.TB, k *crypto.Key, lengths []int) ([]Buf, []byte, uint) {
} }
// pack blobs // pack blobs
p := pack.NewPacker(k, new(bytes.Buffer)) var buf bytes.Buffer
p := pack.NewPacker(k, &buf)
for _, b := range bufs { for _, b := range bufs {
p.Add(restic.TreeBlob, b.id, b.data) p.Add(restic.TreeBlob, b.id, b.data)
} }
@ -44,8 +45,7 @@ func newPack(t testing.TB, k *crypto.Key, lengths []int) ([]Buf, []byte, uint) {
_, err := p.Finalize() _, err := p.Finalize()
rtest.OK(t, err) rtest.OK(t, err)
packData := p.Writer().(*bytes.Buffer).Bytes() return bufs, buf.Bytes(), p.Size()
return bufs, packData, p.Size()
} }
func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReaderAt, packSize uint) { func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReaderAt, packSize uint) {

View File

@ -57,7 +57,10 @@ func (r *packerManager) findPacker() (packer *Packer, err error) {
// search for a suitable packer // search for a suitable packer
if len(r.packers) > 0 { if len(r.packers) > 0 {
p := r.packers[0] p := r.packers[0]
r.packers = r.packers[1:] last := len(r.packers) - 1
r.packers[0] = r.packers[last]
r.packers[last] = nil // Allow GC of stale reference.
r.packers = r.packers[:last]
return p, nil return p, nil
} }