From d42ff509ba4f18b7ebf6e022d1076be87aee5b28 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 8 Aug 2015 13:47:08 +0200 Subject: [PATCH] Small refactorings * use uint instead of uint32 in packs/indexes * use ID.Str() for debug messages * add ParallelIDWorkFunc --- pack/pack.go | 8 ++++---- repository/index.go | 8 ++++---- repository/parallel.go | 17 +++++++++++++++++ 3 files changed, 25 insertions(+), 8 deletions(-) diff --git a/pack/pack.go b/pack/pack.go index 481bd9ecb..727566dcf 100644 --- a/pack/pack.go +++ b/pack/pack.go @@ -57,7 +57,7 @@ func (t *BlobType) UnmarshalJSON(buf []byte) error { // Blob is a blob within a pack. type Blob struct { Type BlobType - Length uint32 + Length uint ID backend.ID Offset uint } @@ -100,7 +100,7 @@ func (p *Packer) Add(t BlobType, id backend.ID, rd io.Reader) (int64, error) { c := Blob{Type: t, ID: id} n, err := io.Copy(p.hw, rd) - c.Length = uint32(n) + c.Length = uint(n) c.Offset = p.bytes p.bytes += uint(n) p.blobs = append(p.blobs, c) @@ -164,7 +164,7 @@ func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) { for _, b := range p.blobs { entry := headerEntry{ Type: b.Type, - Length: b.Length, + Length: uint32(b.Length), ID: b.ID, } @@ -276,7 +276,7 @@ func NewUnpacker(k *crypto.Key, entries []Blob, rd io.ReadSeeker) (*Unpacker, er entries = append(entries, Blob{ Type: e.Type, - Length: e.Length, + Length: uint(e.Length), ID: e.ID, Offset: pos, }) diff --git a/repository/index.go b/repository/index.go index d4ece1a0d..b53de02dd 100644 --- a/repository/index.go +++ b/repository/index.go @@ -109,7 +109,7 @@ func (idx *Index) Merge(other *Index) { for k, v := range other.pack { if _, ok := idx.pack[k]; ok { - debug.Log("Index.Merge", "index already has key %v, updating", k[:8]) + debug.Log("Index.Merge", "index already has key %v, updating", k.Str()) } idx.pack[k] = v @@ -146,7 +146,7 @@ func (idx *Index) Each(done chan struct{}) <-chan PackedBlob { ID: id, Offset: blob.offset, Type: blob.tpe, - Length: uint32(blob.length), + Length: blob.length, }, PackID: *blob.packID, }: @@ -166,7 +166,7 @@ func (idx *Index) Count(t pack.BlobType) (n uint) { for id, blob := range idx.pack { if blob.tpe == t { n++ - debug.Log("Index.Count", " blob %v counted: %v", id[:8], blob) + debug.Log("Index.Count", " blob %v counted: %v", id.Str(), blob) } } @@ -201,7 +201,7 @@ func (idx *Index) generatePackList(selectFn func(indexEntry) bool) ([]*packJSON, if blob.packID.IsNull() { debug.Log("Index.generatePackList", "blob %q has no packID! (type %v, offset %v, length %v)", - id[:8], blob.tpe, blob.offset, blob.length) + id.Str(), blob.tpe, blob.offset, blob.length) return nil, fmt.Errorf("unable to serialize index: pack for blob %v hasn't been written yet", id) } diff --git a/repository/parallel.go b/repository/parallel.go index 44f75ccbe..19ba567c5 100644 --- a/repository/parallel.go +++ b/repository/parallel.go @@ -20,6 +20,10 @@ func closeIfOpen(ch chan struct{}) { // processing stops. If done is closed, the function should return. type ParallelWorkFunc func(id string, done <-chan struct{}) error +// ParallelIDWorkFunc gets one backend.ID to work on. If an error is returned, +// processing stops. If done is closed, the function should return. +type ParallelIDWorkFunc func(id backend.ID, done <-chan struct{}) error + // FilesInParallel runs n workers of f in parallel, on the IDs that // repo.List(t) yield. If f returns an error, the process is aborted and the // first error is returned. @@ -69,3 +73,16 @@ func FilesInParallel(repo backend.Lister, t backend.Type, n uint, f ParallelWork return nil } + +// ParallelWorkFuncParseID converts a function that takes a backend.ID to a +// function that takes a string. +func ParallelWorkFuncParseID(f ParallelIDWorkFunc) ParallelWorkFunc { + return func(s string, done <-chan struct{}) error { + id, err := backend.ParseID(s) + if err != nil { + return err + } + + return f(id, done) + } +}