Simplify pack.List

This commit is contained in:
Alexander Neumann 2016-08-25 21:51:07 +02:00
parent 3fd1e4a992
commit de88fb2022
7 changed files with 71 additions and 106 deletions

View File

@ -126,14 +126,18 @@ func printPacks(repo *repository.Repository, wr io.Writer) error {
name := job.Data.(string) name := job.Data.(string)
h := backend.Handle{Type: backend.Data, Name: name} h := backend.Handle{Type: backend.Data, Name: name}
ldr := pack.BackendLoader{Backend: repo.Backend(), Handle: h}
unpacker, err := pack.NewUnpacker(repo.Key(), ldr) blobInfo, err := repo.Backend().Stat(h)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return unpacker.Entries, nil blobs, err := pack.List(repo.Key(), backend.ReaderAt(repo.Backend(), h), blobInfo.Size)
if err != nil {
return nil, err
}
return blobs, nil
} }
jobCh := make(chan worker.Job) jobCh := make(chan worker.Job)

View File

@ -1,6 +1,7 @@
package checker package checker
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"sync" "sync"
@ -676,7 +677,7 @@ func checkPack(r *repository.Repository, id backend.ID) error {
return fmt.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) return fmt.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
} }
blobs, err := pack.List(r.Key(), pack.BufferLoader(buf)) blobs, err := pack.List(r.Key(), bytes.NewReader(buf), int64(len(buf)))
if err != nil { if err != nil {
return err return err
} }

View File

@ -1,43 +0,0 @@
package pack
import (
"errors"
"restic/backend"
)
// Loader loads data from somewhere at a given offset. In contrast to
// io.ReaderAt, off may be negative, in which case it references a position
// relative to the end of the file (similar to Seek()).
type Loader interface {
Load(p []byte, off int64) (int, error)
}
// BackendLoader creates a Loader from a Backend and a Handle.
type BackendLoader struct {
Backend backend.Backend
Handle backend.Handle
}
// Load returns data at the given offset.
func (l BackendLoader) Load(p []byte, off int64) (int, error) {
return l.Backend.Load(l.Handle, p, off)
}
// BufferLoader allows using a buffer as a Loader.
type BufferLoader []byte
// Load returns data at the given offset.
func (b BufferLoader) Load(p []byte, off int64) (int, error) {
switch {
case off > int64(len(b)):
return 0, errors.New("offset is larger than data")
case off < -int64(len(b)):
off = 0
case off < 0:
off = int64(len(b)) + off
}
b = b[off:]
return copy(p, b), nil
}

View File

@ -228,67 +228,73 @@ func (p *Packer) String() string {
return fmt.Sprintf("<Packer %d blobs, %d bytes>", len(p.blobs), p.bytes) return fmt.Sprintf("<Packer %d blobs, %d bytes>", len(p.blobs), p.bytes)
} }
const ( // readHeaderLength returns the header length read from the end of the file
preloadHeaderSize = 2048 // encoded in little endian.
maxHeaderSize = 16 * 1024 * 1024 func readHeaderLength(rd io.ReaderAt, size int64) (uint32, error) {
) off := size - int64(binary.Size(uint32(0)))
buf := make([]byte, binary.Size(uint32(0)))
n, err := rd.ReadAt(buf, off)
if err != nil {
return 0, err
}
if n != len(buf) {
return 0, errors.New("not enough bytes read")
}
return binary.LittleEndian.Uint32(buf), nil
}
const maxHeaderSize = 16 * 1024 * 1024
// readHeader reads the header at the end of rd. size is the length of the
// whole data accessible in rd.
func readHeader(rd io.ReaderAt, size int64) ([]byte, error) {
hl, err := readHeaderLength(rd, size)
if err != nil {
return nil, err
}
if int64(hl) > size-int64(binary.Size(hl)) {
return nil, errors.New("header is larger than file")
}
if int64(hl) > maxHeaderSize {
return nil, errors.New("header is larger than maxHeaderSize")
}
buf := make([]byte, int(hl))
n, err := rd.ReadAt(buf, size-int64(hl)-int64(binary.Size(hl)))
if err != nil {
return nil, err
}
if n != len(buf) {
return nil, errors.New("not enough bytes read")
}
return buf, nil
}
// List returns the list of entries found in a pack file. // List returns the list of entries found in a pack file.
func List(k *crypto.Key, ldr Loader) (entries []Blob, err error) { func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []Blob, err error) {
buf, err := readHeader(rd, size)
// read the last 2048 byte, this will mostly be enough for the header, so
// we do not need another round trip.
buf := make([]byte, preloadHeaderSize)
n, err := ldr.Load(buf, -int64(len(buf)))
if err == io.ErrUnexpectedEOF {
err = nil
buf = buf[:n]
}
if err != nil { if err != nil {
return nil, fmt.Errorf("Load at -%d failed: %v", len(buf), err) return nil, err
}
buf = buf[:n]
bs := binary.Size(uint32(0))
p := len(buf) - bs
// read the length from the end of the buffer
length := int(binary.LittleEndian.Uint32(buf[p : p+bs]))
buf = buf[:p]
if length > maxHeaderSize {
return nil, fmt.Errorf("header too large (%d bytes)", length)
} }
// if the header is longer than the preloaded buffer, call the loader again.
if length > len(buf) {
buf = make([]byte, length)
n, err := ldr.Load(buf, -int64(len(buf)+bs))
if err != nil {
return nil, fmt.Errorf("Load at -%d failed: %v", len(buf), err)
}
if n != len(buf) {
return nil, fmt.Errorf("not enough header bytes read: wanted %v, got %v", len(buf), n)
}
}
buf = buf[len(buf)-length:]
// read header
hdr, err := crypto.Decrypt(k, buf, buf) hdr, err := crypto.Decrypt(k, buf, buf)
if err != nil { if err != nil {
return nil, err return nil, err
} }
rd := bytes.NewReader(hdr) hdrRd := bytes.NewReader(hdr)
pos := uint(0) pos := uint(0)
for { for {
e := headerEntry{} e := headerEntry{}
err = binary.Read(rd, binary.LittleEndian, &e) err = binary.Read(hdrRd, binary.LittleEndian, &e)
if err == io.EOF { if err == io.EOF {
break break
} }

View File

@ -47,7 +47,7 @@ func newPack(t testing.TB, k *crypto.Key, lengths []int) ([]Buf, []byte, uint) {
return bufs, packData, p.Size() return bufs, packData, p.Size()
} }
func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, ldr pack.Loader, packSize uint) { func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReaderAt, packSize uint) {
written := 0 written := 0
for _, buf := range bufs { for _, buf := range bufs {
written += len(buf.data) written += len(buf.data)
@ -63,7 +63,7 @@ func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, ldr pack.Loader, packS
Equals(t, uint(written), packSize) Equals(t, uint(written), packSize)
// read and parse it again // read and parse it again
entries, err := pack.List(k, ldr) entries, err := pack.List(k, rd, int64(packSize))
OK(t, err) OK(t, err)
Equals(t, len(entries), len(bufs)) Equals(t, len(entries), len(bufs))
@ -76,7 +76,7 @@ func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, ldr pack.Loader, packS
buf = make([]byte, int(e.Length)) buf = make([]byte, int(e.Length))
} }
buf = buf[:int(e.Length)] buf = buf[:int(e.Length)]
n, err := ldr.Load(buf, int64(e.Offset)) n, err := rd.ReadAt(buf, int64(e.Offset))
OK(t, err) OK(t, err)
buf = buf[:n] buf = buf[:n]
@ -91,7 +91,7 @@ func TestCreatePack(t *testing.T) {
bufs, packData, packSize := newPack(t, k, testLens) bufs, packData, packSize := newPack(t, k, testLens)
Equals(t, uint(len(packData)), packSize) Equals(t, uint(len(packData)), packSize)
verifyBlobs(t, bufs, k, pack.BufferLoader(packData), packSize) verifyBlobs(t, bufs, k, bytes.NewReader(packData), packSize)
} }
var blobTypeJSON = []struct { var blobTypeJSON = []struct {
@ -128,8 +128,7 @@ func TestUnpackReadSeeker(t *testing.T) {
handle := backend.Handle{Type: backend.Data, Name: id.String()} handle := backend.Handle{Type: backend.Data, Name: id.String()}
OK(t, b.Save(handle, packData)) OK(t, b.Save(handle, packData))
ldr := pack.BackendLoader{Backend: b, Handle: handle} verifyBlobs(t, bufs, k, backend.ReaderAt(b, handle), packSize)
verifyBlobs(t, bufs, k, ldr, packSize)
} }
func TestShortPack(t *testing.T) { func TestShortPack(t *testing.T) {
@ -142,6 +141,5 @@ func TestShortPack(t *testing.T) {
handle := backend.Handle{Type: backend.Data, Name: id.String()} handle := backend.Handle{Type: backend.Data, Name: id.String()}
OK(t, b.Save(handle, packData)) OK(t, b.Save(handle, packData))
ldr := pack.BackendLoader{Backend: b, Handle: handle} verifyBlobs(t, bufs, k, backend.ReaderAt(b, handle), packSize)
verifyBlobs(t, bufs, k, ldr, packSize)
} }

View File

@ -1,6 +1,7 @@
package repository package repository
import ( import (
"bytes"
"io" "io"
"restic/backend" "restic/backend"
"restic/crypto" "restic/crypto"
@ -32,7 +33,7 @@ func Repack(repo *Repository, packs backend.IDSet, keepBlobs pack.BlobSet) (err
debug.Log("Repack", "pack %v loaded (%d bytes)", packID.Str(), len(buf)) debug.Log("Repack", "pack %v loaded (%d bytes)", packID.Str(), len(buf))
blobs, err := pack.List(repo.Key(), pack.BufferLoader(buf)) blobs, err := pack.List(repo.Key(), bytes.NewReader(buf), int64(len(buf)))
if err != nil { if err != nil {
return err return err
} }

View File

@ -554,9 +554,7 @@ func (r *Repository) ListPack(id backend.ID) ([]pack.Blob, int64, error) {
return nil, 0, err return nil, 0, err
} }
ldr := pack.BackendLoader{Backend: r.Backend(), Handle: h} blobs, err := pack.List(r.Key(), backend.ReaderAt(r.Backend(), h), blobInfo.Size)
blobs, err := pack.List(r.Key(), ldr)
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
} }