Switch s3 library to allow for s3 compatible backends. Fixes #315

This commit is contained in:
Chris Howey 2015-11-06 15:31:59 -06:00 committed by Alexander Neumann
parent c969de7fad
commit 6d1552af51
68 changed files with 5994 additions and 4655 deletions

13
Godeps/Godeps.json generated
View File

@ -22,6 +22,11 @@
"ImportPath": "github.com/kr/fs",
"Rev": "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
},
{
"ImportPath": "github.com/minio/minio-go",
"Comment": "v0.2.5-58-g5c3a491",
"Rev": "5c3a4919116141f088990bd6ee385877648c7a25"
},
{
"ImportPath": "github.com/pkg/sftp",
"Rev": "518aed2757a65cfa64d4b1b2baf08410f8b7a6bc"
@ -49,14 +54,6 @@
{
"ImportPath": "golang.org/x/net/context",
"Rev": "7654728e381988afd88e58cabfd6363a5ea91810"
},
{
"ImportPath": "gopkg.in/amz.v3/aws",
"Rev": "bff3a097c4108da57bb8cbe3aad2990d74d23676"
},
{
"ImportPath": "gopkg.in/amz.v3/s3",
"Rev": "bff3a097c4108da57bb8cbe3aad2990d74d23676"
}
]
}

View File

@ -0,0 +1,2 @@
*~
*.test

View File

@ -0,0 +1,9 @@
language: go
go:
- 1.5.1
script:
- go vet ./...
- go test -race -v ./...
notifications:
slack:
secure: HrOX2k6F/sEl6Rr4m5vHOdJCIwV42be0kz1Jy/WSMvrl/fQ8YkldKviLeWh4aWt1kclsYhNQ4FqGML+RIZYsdOqej4fAw9Vi5pZkI1MzPJq0UjrtMqkqzvD90eDGQYCKwaXjEIN8cohwJeb6X0B0HKAd9sqJW5GH5SwnhH5WWP8=

View File

@ -0,0 +1,21 @@
### Developer Guidelines
``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following:
* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes.
- Fork it
- Create your feature branch (git checkout -b my-new-feature)
- Commit your changes (git commit -am 'Add some feature')
- Push to the branch (git push origin my-new-feature)
- Create new Pull Request
* When you're ready to create a pull request, be sure to:
- Have test cases for the new code. If you have questions about how to do it, please ask in your pull request.
- Run `go fmt`
- Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
- Make sure `go test -race ./...` and `go build` completes.
* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project
- `minio-go` project is strictly conformant with Golang style
- if you happen to observe offending code, please feel free to send a pull request

202
Godeps/_workspace/src/github.com/minio/minio-go/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,71 @@
# Minio Go Library for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
## Install
```sh
$ go get github.com/minio/minio-go
```
## Example
```go
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://s3.amazonaws.com",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
for bucket := range s3Client.ListBuckets() {
if bucket.Err != nil {
log.Fatalln(bucket.Err)
}
log.Println(bucket.Stat)
}
}
```
## Documentation
### Bucket Level
* [MakeBucket(bucket, acl) error](examples/s3/makebucket.go)
* [BucketExists(bucket) error](examples/s3/bucketexists.go)
* [RemoveBucket(bucket) error](examples/s3/removebucket.go)
* [GetBucketACL(bucket) (BucketACL, error)](examples/s3/getbucketacl.go)
* [SetBucketACL(bucket, BucketACL) error)](examples/s3/setbucketacl.go)
* [ListBuckets() <-chan BucketStat](examples/s3/listbuckets.go)
* [ListObjects(bucket, prefix, recursive) <-chan ObjectStat](examples/s3/listobjects.go)
* [ListIncompleteUploads(bucket, prefix, recursive) <-chan ObjectMultipartStat](examples/s3/listincompleteuploads.go)
### Object Level
* [PutObject(bucket, object, size, io.Reader) error](examples/s3/putobject.go)
* [GetObject(bucket, object) (io.Reader, ObjectStat, error)](examples/s3/getobject.go)
* [GetPartialObject(bucket, object, offset, length) (io.Reader, ObjectStat, error)](examples/s3/getpartialobject.go)
* [StatObject(bucket, object) (ObjectStat, error)](examples/s3/statobject.go)
* [RemoveObject(bucket, object) error](examples/s3/removeobject.go)
* [RemoveIncompleteUpload(bucket, object) <-chan error](examples/s3/removeincompleteupload.go)
### Presigned Bucket/Object Level
* [PresignedGetObject(bucket, object, time.Duration) (string, error)](examples/s3/presignedgetobject.go)
* [PresignedPutObject(bucket, object, time.Duration) (string, error)](examples/s3/presignedputobject.go)
* [PresignedPostPolicy(NewPostPolicy()) (map[string]string, error)](examples/s3/presignedpostpolicy.go)
### API Reference
[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/minio/minio-go)
## Contribute
[Contributors Guide](./CONTRIBUTING.md)
[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) [![Build status](https://ci.appveyor.com/api/projects/status/1ep7n2resn6fk1w6?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go)

View File

@ -0,0 +1,885 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"encoding/base64"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
)
const (
separator = "/"
)
// apiCore container to hold unexported internal functions
type apiCore struct {
config *Config
}
// closeResp close non nil response with any response Body
func closeResp(resp *http.Response) {
if resp != nil && resp.Body != nil {
resp.Body.Close()
}
}
// putBucketRequest wrapper creates a new putBucket request
func (a apiCore) putBucketRequest(bucket, acl, location string) (*request, error) {
var r *request
var err error
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "PUT",
HTTPPath: separator + bucket,
}
var createBucketConfigBuffer *bytes.Reader
// If location is set use it and create proper bucket configuration
switch {
case location != "":
createBucketConfig := new(createBucketConfiguration)
createBucketConfig.Location = location
var createBucketConfigBytes []byte
switch {
case a.config.AcceptType == "application/xml":
createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
case a.config.AcceptType == "application/json":
createBucketConfigBytes, err = json.Marshal(createBucketConfig)
default:
createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
}
if err != nil {
return nil, err
}
createBucketConfigBuffer = bytes.NewReader(createBucketConfigBytes)
}
switch {
case createBucketConfigBuffer == nil:
r, err = newRequest(op, a.config, nil)
if err != nil {
return nil, err
}
default:
r, err = newRequest(op, a.config, createBucketConfigBuffer)
if err != nil {
return nil, err
}
r.req.ContentLength = int64(createBucketConfigBuffer.Len())
}
// by default bucket is private
switch {
case acl != "":
r.Set("x-amz-acl", acl)
default:
r.Set("x-amz-acl", "private")
}
return r, nil
}
/// Bucket Write Operations
// putBucket create a new bucket
//
// Requires valid AWS Access Key ID to authenticate requests
// Anonymous requests are never allowed to create buckets
//
// optional arguments are acl and location - by default all buckets are created
// with ``private`` acl and location set to US Standard if one wishes to set
// different ACLs and Location one can set them properly.
//
// ACL valid values
// ------------------
// private - owner gets full access [DEFAULT]
// public-read - owner gets full access, others get read access
// public-read-write - owner gets full access, others get full access too
// authenticated-read - owner gets full access, authenticated users get read access
// ------------------
//
// Location valid values
// ------------------
// [ us-west-1 | us-west-2 | eu-west-1 | eu-central-1 | ap-southeast-1 | ap-northeast-1 | ap-southeast-2 | sa-east-1 ]
//
// Default - US standard
func (a apiCore) putBucket(bucket, acl, location string) error {
req, err := a.putBucketRequest(bucket, acl, location)
if err != nil {
return err
}
resp, err := req.Do()
defer closeResp(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return BodyToErrorResponse(resp.Body, a.config.AcceptType)
}
}
return nil
}
// putBucketRequestACL wrapper creates a new putBucketACL request
func (a apiCore) putBucketACLRequest(bucket, acl string) (*request, error) {
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "PUT",
HTTPPath: separator + bucket + "?acl",
}
req, err := newRequest(op, a.config, nil)
if err != nil {
return nil, err
}
req.Set("x-amz-acl", acl)
return req, nil
}
// putBucketACL set the permissions on an existing bucket using Canned ACL's
func (a apiCore) putBucketACL(bucket, acl string) error {
req, err := a.putBucketACLRequest(bucket, acl)
if err != nil {
return err
}
resp, err := req.Do()
defer closeResp(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return BodyToErrorResponse(resp.Body, a.config.AcceptType)
}
}
return nil
}
// getBucketACLRequest wrapper creates a new getBucketACL request
func (a apiCore) getBucketACLRequest(bucket string) (*request, error) {
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "GET",
HTTPPath: separator + bucket + "?acl",
}
req, err := newRequest(op, a.config, nil)
if err != nil {
return nil, err
}
return req, nil
}
// getBucketACL get the acl information on an existing bucket
func (a apiCore) getBucketACL(bucket string) (accessControlPolicy, error) {
req, err := a.getBucketACLRequest(bucket)
if err != nil {
return accessControlPolicy{}, err
}
resp, err := req.Do()
defer closeResp(resp)
if err != nil {
return accessControlPolicy{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return accessControlPolicy{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
}
}
policy := accessControlPolicy{}
err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &policy)
if err != nil {
return accessControlPolicy{}, err
}
if policy.AccessControlList.Grant == nil {
errorResponse := ErrorResponse{
Code: "InternalError",
Message: "Access control Grant list is empty, please report this at https://github.com/minio/minio-go/issues",
Resource: separator + bucket,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
return accessControlPolicy{}, errorResponse
}
return policy, nil
}
// getBucketLocationRequest wrapper creates a new getBucketLocation request
func (a apiCore) getBucketLocationRequest(bucket string) (*request, error) {
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "GET",
HTTPPath: separator + bucket + "?location",
}
req, err := newRequest(op, a.config, nil)
if err != nil {
return nil, err
}
return req, nil
}
// getBucketLocation uses location subresource to return a bucket's region
func (a apiCore) getBucketLocation(bucket string) (string, error) {
req, err := a.getBucketLocationRequest(bucket)
if err != nil {
return "", err
}
resp, err := req.Do()
defer closeResp(resp)
if err != nil {
return "", err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return "", BodyToErrorResponse(resp.Body, a.config.AcceptType)
}
}
var locationConstraint string
err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &locationConstraint)
if err != nil {
return "", err
}
return locationConstraint, nil
}
// listObjectsRequest wrapper creates a new listObjects request
func (a apiCore) listObjectsRequest(bucket, marker, prefix, delimiter string, maxkeys int) (*request, error) {
// resourceQuery - get resources properly escaped and lined up before using them in http request
resourceQuery := func() (*string, error) {
switch {
case marker != "":
marker = fmt.Sprintf("&marker=%s", getURLEncodedPath(marker))
fallthrough
case prefix != "":
prefix = fmt.Sprintf("&prefix=%s", getURLEncodedPath(prefix))
fallthrough
case delimiter != "":
delimiter = fmt.Sprintf("&delimiter=%s", delimiter)
}
query := fmt.Sprintf("?max-keys=%d", maxkeys) + marker + prefix + delimiter
return &query, nil
}
query, err := resourceQuery()
if err != nil {
return nil, err
}
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "GET",
HTTPPath: separator + bucket + *query,
}
r, err := newRequest(op, a.config, nil)
if err != nil {
return nil, err
}
return r, nil
}
/// Bucket Read Operations
// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket.
//
// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
// request paramters :-
// ---------
// ?marker - Specifies the key to start with when listing objects in a bucket.
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-keys - Sets the maximum number of keys returned in the response body.
func (a apiCore) listObjects(bucket, marker, prefix, delimiter string, maxkeys int) (listBucketResult, error) {
if err := invalidBucketError(bucket); err != nil {
return listBucketResult{}, err
}
req, err := a.listObjectsRequest(bucket, marker, prefix, delimiter, maxkeys)
if err != nil {
return listBucketResult{}, err
}
resp, err := req.Do()
defer closeResp(resp)
if err != nil {
return listBucketResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return listBucketResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
}
}
listBucketResult := listBucketResult{}
err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &listBucketResult)
if err != nil {
return listBucketResult, err
}
// close body while returning, along with any error
return listBucketResult, nil
}
// headBucketRequest wrapper creates a new headBucket request
func (a apiCore) headBucketRequest(bucket string) (*request, error) {
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "HEAD",
HTTPPath: separator + bucket,
}
return newRequest(op, a.config, nil)
}
// headBucket useful to determine if a bucket exists and you have permission to access it.
func (a apiCore) headBucket(bucket string) error {
if err := invalidBucketError(bucket); err != nil {
return err
}
req, err := a.headBucketRequest(bucket)
if err != nil {
return err
}
resp, err := req.Do()
defer closeResp(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
// Head has no response body, handle it
var errorResponse ErrorResponse
switch resp.StatusCode {
case http.StatusNotFound:
errorResponse = ErrorResponse{
Code: "NoSuchBucket",
Message: "The specified bucket does not exist.",
Resource: separator + bucket,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
case http.StatusForbidden:
errorResponse = ErrorResponse{
Code: "AccessDenied",
Message: "Access Denied",
Resource: separator + bucket,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
default:
errorResponse = ErrorResponse{
Code: resp.Status,
Message: resp.Status,
Resource: separator + bucket,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
}
return errorResponse
}
}
return nil
}
// deleteBucketRequest wrapper creates a new deleteBucket request
func (a apiCore) deleteBucketRequest(bucket string) (*request, error) {
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "DELETE",
HTTPPath: separator + bucket,
}
return newRequest(op, a.config, nil)
}
// deleteBucket deletes the bucket named in the URI
//
// NOTE: -
// All objects (including all object versions and delete markers)
// in the bucket must be deleted before successfully attempting this request
func (a apiCore) deleteBucket(bucket string) error {
if err := invalidBucketError(bucket); err != nil {
return err
}
req, err := a.deleteBucketRequest(bucket)
if err != nil {
return err
}
resp, err := req.Do()
defer closeResp(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusNoContent {
var errorResponse ErrorResponse
switch resp.StatusCode {
case http.StatusNotFound:
errorResponse = ErrorResponse{
Code: "NoSuchBucket",
Message: "The specified bucket does not exist.",
Resource: separator + bucket,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
case http.StatusForbidden:
errorResponse = ErrorResponse{
Code: "AccessDenied",
Message: "Access Denied",
Resource: separator + bucket,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
case http.StatusConflict:
errorResponse = ErrorResponse{
Code: "Conflict",
Message: "Bucket not empty",
Resource: separator + bucket,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
default:
errorResponse = ErrorResponse{
Code: resp.Status,
Message: resp.Status,
Resource: separator + bucket,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
}
return errorResponse
}
}
return nil
}
/// Object Read/Write/Stat Operations
func (a apiCore) putObjectUnAuthenticatedRequest(bucket, object, contentType string, size int64, body io.Reader) (*request, error) {
if strings.TrimSpace(contentType) == "" {
contentType = "application/octet-stream"
}
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "PUT",
HTTPPath: separator + bucket + separator + object,
}
r, err := newUnauthenticatedRequest(op, a.config, body)
if err != nil {
return nil, err
}
// Content-MD5 is not set consciously
r.Set("Content-Type", contentType)
r.req.ContentLength = size
return r, nil
}
// putObjectUnAuthenticated - add an object to a bucket
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
func (a apiCore) putObjectUnAuthenticated(bucket, object, contentType string, size int64, body io.Reader) (ObjectStat, error) {
req, err := a.putObjectUnAuthenticatedRequest(bucket, object, contentType, size, body)
if err != nil {
return ObjectStat{}, err
}
resp, err := req.Do()
defer closeResp(resp)
if err != nil {
return ObjectStat{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return ObjectStat{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
}
}
var metadata ObjectStat
metadata.ETag = strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes
return metadata, nil
}
// putObjectRequest wrapper creates a new PutObject request
func (a apiCore) putObjectRequest(bucket, object, contentType string, md5SumBytes []byte, size int64, body io.ReadSeeker) (*request, error) {
if strings.TrimSpace(contentType) == "" {
contentType = "application/octet-stream"
}
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "PUT",
HTTPPath: separator + bucket + separator + object,
}
r, err := newRequest(op, a.config, body)
if err != nil {
return nil, err
}
// set Content-MD5 as base64 encoded md5
r.Set("Content-MD5", base64.StdEncoding.EncodeToString(md5SumBytes))
r.Set("Content-Type", contentType)
r.req.ContentLength = size
return r, nil
}
// putObject - add an object to a bucket
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
func (a apiCore) putObject(bucket, object, contentType string, md5SumBytes []byte, size int64, body io.ReadSeeker) (ObjectStat, error) {
req, err := a.putObjectRequest(bucket, object, contentType, md5SumBytes, size, body)
if err != nil {
return ObjectStat{}, err
}
resp, err := req.Do()
defer closeResp(resp)
if err != nil {
return ObjectStat{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return ObjectStat{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
}
}
var metadata ObjectStat
metadata.ETag = strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes
return metadata, nil
}
func (a apiCore) presignedPostPolicy(p *PostPolicy) map[string]string {
t := time.Now().UTC()
r := new(request)
r.config = a.config
credential := getCredential(r.config.AccessKeyID, r.config.Region, t)
p.addNewPolicy(policy{"eq", "$x-amz-date", t.Format(iso8601DateFormat)})
p.addNewPolicy(policy{"eq", "$x-amz-algorithm", authHeader})
p.addNewPolicy(policy{"eq", "$x-amz-credential", credential})
policyBase64 := p.base64()
p.formData["policy"] = policyBase64
p.formData["x-amz-algorithm"] = authHeader
p.formData["x-amz-credential"] = credential
p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
p.formData["x-amz-signature"] = r.PostPresignSignature(policyBase64, t)
return p.formData
}
func (a apiCore) presignedPutObject(bucket, object string, expires int64) (string, error) {
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "PUT",
HTTPPath: separator + bucket + separator + object,
}
r, err := newPresignedRequest(op, a.config, strconv.FormatInt(expires, 10))
if err != nil {
return "", err
}
return r.PreSignV4()
}
func (a apiCore) presignedGetObjectRequest(bucket, object string, expires, offset, length int64) (*request, error) {
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "GET",
HTTPPath: separator + bucket + separator + object,
}
r, err := newPresignedRequest(op, a.config, strconv.FormatInt(expires, 10))
if err != nil {
return nil, err
}
switch {
case length > 0 && offset > 0:
r.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
case offset > 0 && length == 0:
r.Set("Range", fmt.Sprintf("bytes=%d-", offset))
case length > 0 && offset == 0:
r.Set("Range", fmt.Sprintf("bytes=-%d", length))
}
return r, nil
}
func (a apiCore) presignedGetObject(bucket, object string, expires, offset, length int64) (string, error) {
if err := invalidArgumentError(object); err != nil {
return "", err
}
req, err := a.presignedGetObjectRequest(bucket, object, expires, offset, length)
if err != nil {
return "", err
}
return req.PreSignV4()
}
// getObjectRequest wrapper creates a new getObject request
func (a apiCore) getObjectRequest(bucket, object string, offset, length int64) (*request, error) {
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "GET",
HTTPPath: separator + bucket + separator + object,
}
r, err := newRequest(op, a.config, nil)
if err != nil {
return nil, err
}
switch {
case length > 0 && offset > 0:
r.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
case offset > 0 && length == 0:
r.Set("Range", fmt.Sprintf("bytes=%d-", offset))
case length > 0 && offset == 0:
r.Set("Range", fmt.Sprintf("bytes=-%d", length))
}
return r, nil
}
// getObject - retrieve object from Object Storage
//
// Additionally this function also takes range arguments to download the specified
// range bytes of an object. Setting offset and length = 0 will download the full object.
//
// For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
func (a apiCore) getObject(bucket, object string, offset, length int64) (io.ReadCloser, ObjectStat, error) {
if err := invalidArgumentError(object); err != nil {
return nil, ObjectStat{}, err
}
req, err := a.getObjectRequest(bucket, object, offset, length)
if err != nil {
return nil, ObjectStat{}, err
}
resp, err := req.Do()
if err != nil {
return nil, ObjectStat{}, err
}
if resp != nil {
switch resp.StatusCode {
case http.StatusOK:
case http.StatusPartialContent:
default:
return nil, ObjectStat{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
}
}
md5sum := strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
if err != nil {
return nil, ObjectStat{}, ErrorResponse{
Code: "InternalError",
Message: "Last-Modified time format not recognized, please report this issue at https://github.com/minio/minio-go/issues",
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
}
contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
if contentType == "" {
contentType = "application/octet-stream"
}
var objectstat ObjectStat
objectstat.ETag = md5sum
objectstat.Key = object
objectstat.Size = resp.ContentLength
objectstat.LastModified = date
objectstat.ContentType = contentType
// do not close body here, caller will close
return resp.Body, objectstat, nil
}
// deleteObjectRequest wrapper creates a new deleteObject request
func (a apiCore) deleteObjectRequest(bucket, object string) (*request, error) {
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "DELETE",
HTTPPath: separator + bucket + separator + object,
}
return newRequest(op, a.config, nil)
}
// deleteObject deletes a given object from a bucket
func (a apiCore) deleteObject(bucket, object string) error {
if err := invalidBucketError(bucket); err != nil {
return err
}
if err := invalidArgumentError(object); err != nil {
return err
}
req, err := a.deleteObjectRequest(bucket, object)
if err != nil {
return err
}
resp, err := req.Do()
defer closeResp(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusNoContent {
var errorResponse ErrorResponse
switch resp.StatusCode {
case http.StatusNotFound:
errorResponse = ErrorResponse{
Code: "NoSuchKey",
Message: "The specified key does not exist.",
Resource: separator + bucket + separator + object,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
case http.StatusForbidden:
errorResponse = ErrorResponse{
Code: "AccessDenied",
Message: "Access Denied",
Resource: separator + bucket + separator + object,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
default:
errorResponse = ErrorResponse{
Code: resp.Status,
Message: resp.Status,
Resource: separator + bucket + separator + object,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
}
return errorResponse
}
}
return nil
}
// headObjectRequest wrapper creates a new headObject request
func (a apiCore) headObjectRequest(bucket, object string) (*request, error) {
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "HEAD",
HTTPPath: separator + bucket + separator + object,
}
return newRequest(op, a.config, nil)
}
// headObject retrieves metadata from an object without returning the object itself
func (a apiCore) headObject(bucket, object string) (ObjectStat, error) {
if err := invalidBucketError(bucket); err != nil {
return ObjectStat{}, err
}
if err := invalidArgumentError(object); err != nil {
return ObjectStat{}, err
}
req, err := a.headObjectRequest(bucket, object)
if err != nil {
return ObjectStat{}, err
}
resp, err := req.Do()
defer closeResp(resp)
if err != nil {
return ObjectStat{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
var errorResponse ErrorResponse
switch resp.StatusCode {
case http.StatusNotFound:
errorResponse = ErrorResponse{
Code: "NoSuchKey",
Message: "The specified key does not exist.",
Resource: separator + bucket + separator + object,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
case http.StatusForbidden:
errorResponse = ErrorResponse{
Code: "AccessDenied",
Message: "Access Denied",
Resource: separator + bucket + separator + object,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
default:
errorResponse = ErrorResponse{
Code: resp.Status,
Message: resp.Status,
Resource: separator + bucket + separator + object,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
}
return ObjectStat{}, errorResponse
}
}
md5sum := strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil {
return ObjectStat{}, ErrorResponse{
Code: "InternalError",
Message: "Content-Length not recognized, please report this issue at https://github.com/minio/minio-go/issues",
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
}
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
if err != nil {
return ObjectStat{}, ErrorResponse{
Code: "InternalError",
Message: "Last-Modified time format not recognized, please report this issue at https://github.com/minio/minio-go/issues",
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
}
contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
if contentType == "" {
contentType = "application/octet-stream"
}
var objectstat ObjectStat
objectstat.ETag = md5sum
objectstat.Key = object
objectstat.Size = size
objectstat.LastModified = date
objectstat.ContentType = contentType
return objectstat, nil
}
/// Service Operations
// listBucketRequest wrapper creates a new listBuckets request
func (a apiCore) listBucketsRequest() (*request, error) {
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "GET",
HTTPPath: separator,
}
return newRequest(op, a.config, nil)
}
// listBuckets list of all buckets owned by the authenticated sender of the request
func (a apiCore) listBuckets() (listAllMyBucketsResult, error) {
req, err := a.listBucketsRequest()
if err != nil {
return listAllMyBucketsResult{}, err
}
resp, err := req.Do()
defer closeResp(resp)
if err != nil {
return listAllMyBucketsResult{}, err
}
if resp != nil {
// for un-authenticated requests, amazon sends a redirect handle it
if resp.StatusCode == http.StatusTemporaryRedirect {
return listAllMyBucketsResult{}, ErrorResponse{
Code: "AccessDenied",
Message: "Anonymous access is forbidden for this operation",
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
}
if resp.StatusCode != http.StatusOK {
return listAllMyBucketsResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
}
}
listAllMyBucketsResult := listAllMyBucketsResult{}
err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &listAllMyBucketsResult)
if err != nil {
return listAllMyBucketsResult, err
}
return listAllMyBucketsResult, nil
}

View File

@ -0,0 +1,329 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"net/http"
"strconv"
)
// listMultipartUploadsRequest wrapper creates a new listMultipartUploads request
func (a apiCore) listMultipartUploadsRequest(bucket, keymarker, uploadIDMarker, prefix, delimiter string, maxuploads int) (*request, error) {
// resourceQuery - get resources properly escaped and lined up before using them in http request
resourceQuery := func() (string, error) {
switch {
case keymarker != "":
keymarker = fmt.Sprintf("&key-marker=%s", getURLEncodedPath(keymarker))
fallthrough
case uploadIDMarker != "":
uploadIDMarker = fmt.Sprintf("&upload-id-marker=%s", uploadIDMarker)
fallthrough
case prefix != "":
prefix = fmt.Sprintf("&prefix=%s", getURLEncodedPath(prefix))
fallthrough
case delimiter != "":
delimiter = fmt.Sprintf("&delimiter=%s", delimiter)
}
query := fmt.Sprintf("?uploads&max-uploads=%d", maxuploads) + keymarker + uploadIDMarker + prefix + delimiter
return query, nil
}
query, err := resourceQuery()
if err != nil {
return nil, err
}
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "GET",
HTTPPath: separator + bucket + query,
}
r, err := newRequest(op, a.config, nil)
if err != nil {
return nil, err
}
return r, nil
}
// listMultipartUploads - (List Multipart Uploads) - Lists some or all (up to 1000) in-progress multipart uploads in a bucket.
//
// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
// request paramters :-
// ---------
// ?key-marker - Specifies the multipart upload after which listing should begin
// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
func (a apiCore) listMultipartUploads(bucket, keymarker, uploadIDMarker, prefix, delimiter string, maxuploads int) (listMultipartUploadsResult, error) {
req, err := a.listMultipartUploadsRequest(bucket, keymarker, uploadIDMarker, prefix, delimiter, maxuploads)
if err != nil {
return listMultipartUploadsResult{}, err
}
resp, err := req.Do()
defer closeResp(resp)
if err != nil {
return listMultipartUploadsResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return listMultipartUploadsResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
}
}
listMultipartUploadsResult := listMultipartUploadsResult{}
err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &listMultipartUploadsResult)
if err != nil {
return listMultipartUploadsResult, err
}
// close body while returning, along with any error
return listMultipartUploadsResult, nil
}
// initiateMultipartRequest wrapper creates a new initiateMultiPart request
func (a apiCore) initiateMultipartRequest(bucket, object string) (*request, error) {
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "POST",
HTTPPath: separator + bucket + separator + object + "?uploads",
}
return newRequest(op, a.config, nil)
}
// initiateMultipartUpload initiates a multipart upload and returns an upload ID
func (a apiCore) initiateMultipartUpload(bucket, object string) (initiateMultipartUploadResult, error) {
req, err := a.initiateMultipartRequest(bucket, object)
if err != nil {
return initiateMultipartUploadResult{}, err
}
resp, err := req.Do()
defer closeResp(resp)
if err != nil {
return initiateMultipartUploadResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return initiateMultipartUploadResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
}
}
initiateMultipartUploadResult := initiateMultipartUploadResult{}
err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &initiateMultipartUploadResult)
if err != nil {
return initiateMultipartUploadResult, err
}
return initiateMultipartUploadResult, nil
}
// completeMultipartUploadRequest wrapper creates a new CompleteMultipartUpload request
func (a apiCore) completeMultipartUploadRequest(bucket, object, uploadID string, complete completeMultipartUpload) (*request, error) {
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "POST",
HTTPPath: separator + bucket + separator + object + "?uploadId=" + uploadID,
}
var completeMultipartUploadBytes []byte
var err error
switch {
case a.config.AcceptType == "application/xml":
completeMultipartUploadBytes, err = xml.Marshal(complete)
case a.config.AcceptType == "application/json":
completeMultipartUploadBytes, err = json.Marshal(complete)
default:
completeMultipartUploadBytes, err = xml.Marshal(complete)
}
if err != nil {
return nil, err
}
completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
r, err := newRequest(op, a.config, completeMultipartUploadBuffer)
if err != nil {
return nil, err
}
r.req.ContentLength = int64(completeMultipartUploadBuffer.Len())
return r, nil
}
// completeMultipartUpload completes a multipart upload by assembling previously uploaded parts.
func (a apiCore) completeMultipartUpload(bucket, object, uploadID string, c completeMultipartUpload) (completeMultipartUploadResult, error) {
req, err := a.completeMultipartUploadRequest(bucket, object, uploadID, c)
if err != nil {
return completeMultipartUploadResult{}, err
}
resp, err := req.Do()
defer closeResp(resp)
if err != nil {
return completeMultipartUploadResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return completeMultipartUploadResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
}
}
completeMultipartUploadResult := completeMultipartUploadResult{}
err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &completeMultipartUploadResult)
if err != nil {
return completeMultipartUploadResult, err
}
return completeMultipartUploadResult, nil
}
// abortMultipartUploadRequest wrapper creates a new AbortMultipartUpload request
func (a apiCore) abortMultipartUploadRequest(bucket, object, uploadID string) (*request, error) {
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "DELETE",
HTTPPath: separator + bucket + separator + object + "?uploadId=" + uploadID,
}
return newRequest(op, a.config, nil)
}
// abortMultipartUpload aborts a multipart upload for the given uploadID, all parts are deleted
func (a apiCore) abortMultipartUpload(bucket, object, uploadID string) error {
req, err := a.abortMultipartUploadRequest(bucket, object, uploadID)
if err != nil {
return err
}
resp, err := req.Do()
defer closeResp(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusNoContent {
// Abort has no response body, handle it
var errorResponse ErrorResponse
switch resp.StatusCode {
case http.StatusNotFound:
errorResponse = ErrorResponse{
Code: "NoSuchUpload",
Message: "The specified multipart upload does not exist.",
Resource: separator + bucket + separator + object,
RequestID: resp.Header.Get("x-amz-request-id"),
}
case http.StatusForbidden:
errorResponse = ErrorResponse{
Code: "AccessDenied",
Message: "Access Denied",
Resource: separator + bucket + separator + object,
RequestID: resp.Header.Get("x-amz-request-id"),
}
default:
errorResponse = ErrorResponse{
Code: resp.Status,
Message: "",
Resource: separator + bucket + separator + object,
RequestID: resp.Header.Get("x-amz-request-id"),
}
}
return errorResponse
}
}
return nil
}
// listObjectPartsRequest wrapper creates a new ListObjectParts request
func (a apiCore) listObjectPartsRequest(bucket, object, uploadID string, partNumberMarker, maxParts int) (*request, error) {
// resourceQuery - get resources properly escaped and lined up before using them in http request
resourceQuery := func() string {
var partNumberMarkerStr string
switch {
case partNumberMarker != 0:
partNumberMarkerStr = fmt.Sprintf("&part-number-marker=%d", partNumberMarker)
}
return fmt.Sprintf("?uploadId=%s&max-parts=%d", uploadID, maxParts) + partNumberMarkerStr
}
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "GET",
HTTPPath: separator + bucket + separator + object + resourceQuery(),
}
return newRequest(op, a.config, nil)
}
// listObjectParts (List Parts) - lists some or all (up to 1000) parts that have been uploaded for a specific multipart upload
//
// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
// request paramters :-
// ---------
// ?part-number-marker - Specifies the part after which listing should begin.
func (a apiCore) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) {
req, err := a.listObjectPartsRequest(bucket, object, uploadID, partNumberMarker, maxParts)
if err != nil {
return listObjectPartsResult{}, err
}
resp, err := req.Do()
defer closeResp(resp)
if err != nil {
return listObjectPartsResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return listObjectPartsResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
}
}
listObjectPartsResult := listObjectPartsResult{}
err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &listObjectPartsResult)
if err != nil {
return listObjectPartsResult, err
}
return listObjectPartsResult, nil
}
// uploadPartRequest wrapper creates a new UploadPart request
func (a apiCore) uploadPartRequest(bucket, object, uploadID string, md5SumBytes []byte, partNumber int, size int64, body io.ReadSeeker) (*request, error) {
op := &operation{
HTTPServer: a.config.Endpoint,
HTTPMethod: "PUT",
HTTPPath: separator + bucket + separator + object + "?partNumber=" + strconv.Itoa(partNumber) + "&uploadId=" + uploadID,
}
r, err := newRequest(op, a.config, body)
if err != nil {
return nil, err
}
// set Content-MD5 as base64 encoded md5
r.Set("Content-MD5", base64.StdEncoding.EncodeToString(md5SumBytes))
r.req.ContentLength = size
return r, nil
}
// uploadPart uploads a part in a multipart upload.
func (a apiCore) uploadPart(bucket, object, uploadID string, md5SumBytes []byte, partNumber int, size int64, body io.ReadSeeker) (completePart, error) {
req, err := a.uploadPartRequest(bucket, object, uploadID, md5SumBytes, partNumber, size, body)
if err != nil {
return completePart{}, err
}
cPart := completePart{}
cPart.PartNumber = partNumber
cPart.ETag = "\"" + hex.EncodeToString(md5SumBytes) + "\""
// initiate the request
resp, err := req.Do()
defer closeResp(resp)
if err != nil {
return completePart{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return completePart{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
}
}
return cPart, nil
}

1114
Godeps/_workspace/src/github.com/minio/minio-go/api.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,170 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio_test
// bucketHandler is an http.Handler that verifies bucket responses and validates incoming requests
import (
"bytes"
"io"
"net/http"
"strconv"
"time"
)
type bucketHandler struct {
resource string
}
func (h bucketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch {
case r.Method == "GET":
switch {
case r.URL.Path == "/":
response := []byte("<?xml version=\"1.0\" encoding=\"UTF-8\"?><ListAllMyBucketsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Buckets><Bucket><Name>bucket</Name><CreationDate>2015-05-20T23:05:09.230Z</CreationDate></Bucket></Buckets><Owner><ID>minio</ID><DisplayName>minio</DisplayName></Owner></ListAllMyBucketsResult>")
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
w.Write(response)
case r.URL.Path == "/bucket":
_, ok := r.URL.Query()["acl"]
if ok {
response := []byte("<?xml version=\"1.0\" encoding=\"UTF-8\"?><AccessControlPolicy><Owner><ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID><DisplayName>CustomersName@amazon.com</DisplayName></Owner><AccessControlList><Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID><DisplayName>CustomersName@amazon.com</DisplayName></Grantee><Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>")
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
w.Write(response)
return
}
fallthrough
case r.URL.Path == "/bucket":
response := []byte("<?xml version=\"1.0\" encoding=\"UTF-8\"?><ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Contents><ETag>\"259d04a13802ae09c7e41be50ccc6baa\"</ETag><Key>object</Key><LastModified>2015-05-21T18:24:21.097Z</LastModified><Size>22061</Size><Owner><ID>minio</ID><DisplayName>minio</DisplayName></Owner><StorageClass>STANDARD</StorageClass></Contents><Delimiter></Delimiter><EncodingType></EncodingType><IsTruncated>false</IsTruncated><Marker></Marker><MaxKeys>1000</MaxKeys><Name>testbucket</Name><NextMarker></NextMarker><Prefix></Prefix></ListBucketResult>")
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
w.Write(response)
}
case r.Method == "PUT":
switch {
case r.URL.Path == h.resource:
_, ok := r.URL.Query()["acl"]
if ok {
switch r.Header.Get("x-amz-acl") {
case "public-read-write":
fallthrough
case "public-read":
fallthrough
case "private":
fallthrough
case "authenticated-read":
w.WriteHeader(http.StatusOK)
return
default:
w.WriteHeader(http.StatusNotImplemented)
return
}
}
w.WriteHeader(http.StatusOK)
default:
w.WriteHeader(http.StatusBadRequest)
}
case r.Method == "HEAD":
switch {
case r.URL.Path == h.resource:
w.WriteHeader(http.StatusOK)
default:
w.WriteHeader(http.StatusForbidden)
}
case r.Method == "DELETE":
switch {
case r.URL.Path != h.resource:
w.WriteHeader(http.StatusNotFound)
default:
h.resource = ""
w.WriteHeader(http.StatusNoContent)
}
}
}
// objectHandler is an http.Handler that verifies object responses and validates incoming requests
type objectHandler struct {
resource string
data []byte
}
func (h objectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch {
case r.Method == "PUT":
length, err := strconv.Atoi(r.Header.Get("Content-Length"))
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
var buffer bytes.Buffer
_, err = io.CopyN(&buffer, r.Body, int64(length))
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
if !bytes.Equal(h.data, buffer.Bytes()) {
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Header().Set("ETag", "\"9af2f8218b150c351ad802c6f3d66abe\"")
w.WriteHeader(http.StatusOK)
case r.Method == "HEAD":
if r.URL.Path != h.resource {
w.WriteHeader(http.StatusNotFound)
return
}
w.Header().Set("Content-Length", strconv.Itoa(len(h.data)))
w.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat))
w.Header().Set("ETag", "\"9af2f8218b150c351ad802c6f3d66abe\"")
w.WriteHeader(http.StatusOK)
case r.Method == "POST":
_, ok := r.URL.Query()["uploads"]
if ok {
response := []byte("<?xml version=\"1.0\" encoding=\"UTF-8\"?><InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Bucket>example-bucket</Bucket><Key>object</Key><UploadId>XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA</UploadId></InitiateMultipartUploadResult>")
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
w.Write(response)
return
}
case r.Method == "GET":
_, ok := r.URL.Query()["uploadId"]
if ok {
uploadID := r.URL.Query().Get("uploadId")
if uploadID != "XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA" {
w.WriteHeader(http.StatusNotFound)
return
}
response := []byte("<?xml version=\"1.0\" encoding=\"UTF-8\"?><ListPartsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Bucket>example-bucket</Bucket><Key>example-object</Key><UploadId>XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA</UploadId><Initiator><ID>arn:aws:iam::111122223333:user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xx</ID><DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName></Initiator><Owner><ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID><DisplayName>someName</DisplayName></Owner><StorageClass>STANDARD</StorageClass><PartNumberMarker>1</PartNumberMarker><NextPartNumberMarker>3</NextPartNumberMarker><MaxParts>2</MaxParts><IsTruncated>true</IsTruncated><Part><PartNumber>2</PartNumber><LastModified>2010-11-10T20:48:34.000Z</LastModified><ETag>\"7778aef83f66abc1fa1e8477f296d394\"</ETag><Size>10485760</Size></Part><Part><PartNumber>3</PartNumber><LastModified>2010-11-10T20:48:33.000Z</LastModified><ETag>\"aaaa18db4cc2f85cedef654fccc4a4x8\"</ETag><Size>10485760</Size></Part></ListPartsResult>")
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
w.Write(response)
return
}
if r.URL.Path != h.resource {
w.WriteHeader(http.StatusNotFound)
return
}
w.Header().Set("Content-Length", strconv.Itoa(len(h.data)))
w.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat))
w.Header().Set("ETag", "\"9af2f8218b150c351ad802c6f3d66abe\"")
w.WriteHeader(http.StatusOK)
io.Copy(w, bytes.NewReader(h.data))
case r.Method == "DELETE":
if r.URL.Path != h.resource {
w.WriteHeader(http.StatusNotFound)
return
}
h.resource = ""
h.data = nil
w.WriteHeader(http.StatusNoContent)
}
}

View File

@ -0,0 +1,110 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"strings"
"testing"
)
func TestACLTypes(t *testing.T) {
want := map[string]bool{
"private": true,
"public-read": true,
"public-read-write": true,
"authenticated-read": true,
"invalid": false,
}
for acl, ok := range want {
if BucketACL(acl).isValidBucketACL() != ok {
t.Fatal("Error")
}
}
}
func TestUserAgent(t *testing.T) {
conf := new(Config)
conf.SetUserAgent("minio", "1.0", "amd64")
if !strings.Contains(conf.userAgent, "minio") {
t.Fatalf("Error")
}
}
func TestGetRegion(t *testing.T) {
region := getRegion("s3.amazonaws.com")
if region != "us-east-1" {
t.Fatalf("Error")
}
region = getRegion("localhost:9000")
if region != "milkyway" {
t.Fatalf("Error")
}
}
func TestPartSize(t *testing.T) {
var maxPartSize int64 = 1024 * 1024 * 1024 * 5
partSize := calculatePartSize(5000000000000000000)
if partSize > minimumPartSize {
if partSize > maxPartSize {
t.Fatal("invalid result, cannot be bigger than maxPartSize 5GB")
}
}
partSize = calculatePartSize(50000000000)
if partSize > minimumPartSize {
t.Fatal("invalid result, cannot be bigger than minimumPartSize 5MB")
}
}
func TestURLEncoding(t *testing.T) {
type urlStrings struct {
name string
encodedName string
}
want := []urlStrings{
{
name: "bigfile-1._%",
encodedName: "bigfile-1._%25",
},
{
name: "本語",
encodedName: "%E6%9C%AC%E8%AA%9E",
},
{
name: "本語.1",
encodedName: "%E6%9C%AC%E8%AA%9E.1",
},
{
name: ">123>3123123",
encodedName: "%3E123%3E3123123",
},
{
name: "test 1 2.txt",
encodedName: "test%201%202.txt",
},
{
name: "test++ 1.txt",
encodedName: "test%2B%2B%201.txt",
},
}
for _, u := range want {
if u.encodedName != getURLEncodedPath(u.name) {
t.Errorf("Error")
}
}
}

View File

@ -0,0 +1,287 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio_test
import (
"bytes"
"io"
"net/http/httptest"
"testing"
"time"
"github.com/minio/minio-go"
)
func TestBucketOperations(t *testing.T) {
bucket := bucketHandler(bucketHandler{
resource: "/bucket",
})
server := httptest.NewServer(bucket)
defer server.Close()
a, err := minio.New(minio.Config{Endpoint: server.URL})
if err != nil {
t.Fatal("Error")
}
err = a.MakeBucket("bucket", "private")
if err != nil {
t.Fatal("Error")
}
err = a.BucketExists("bucket")
if err != nil {
t.Fatal("Error")
}
err = a.BucketExists("bucket1")
if err == nil {
t.Fatal("Error")
}
if err.Error() != "Access Denied" {
t.Fatal("Error")
}
err = a.SetBucketACL("bucket", "public-read-write")
if err != nil {
t.Fatal("Error")
}
acl, err := a.GetBucketACL("bucket")
if err != nil {
t.Fatal("Error")
}
if acl != minio.BucketACL("private") {
t.Fatal("Error")
}
for b := range a.ListBuckets() {
if b.Err != nil {
t.Fatal(b.Err.Error())
}
if b.Stat.Name != "bucket" {
t.Fatal("Error")
}
}
for o := range a.ListObjects("bucket", "", true) {
if o.Err != nil {
t.Fatal(o.Err.Error())
}
if o.Stat.Key != "object" {
t.Fatal("Error")
}
}
err = a.RemoveBucket("bucket")
if err != nil {
t.Fatal("Error")
}
err = a.RemoveBucket("bucket1")
if err == nil {
t.Fatal("Error")
}
if err.Error() != "The specified bucket does not exist." {
t.Fatal("Error")
}
}
func TestBucketOperationsFail(t *testing.T) {
bucket := bucketHandler(bucketHandler{
resource: "/bucket",
})
server := httptest.NewServer(bucket)
defer server.Close()
a, err := minio.New(minio.Config{Endpoint: server.URL})
if err != nil {
t.Fatal("Error")
}
err = a.MakeBucket("bucket$$$", "private")
if err == nil {
t.Fatal("Error")
}
err = a.BucketExists("bucket.")
if err == nil {
t.Fatal("Error")
}
err = a.SetBucketACL("bucket-.", "public-read-write")
if err == nil {
t.Fatal("Error")
}
_, err = a.GetBucketACL("bucket??")
if err == nil {
t.Fatal("Error")
}
for o := range a.ListObjects("bucket??", "", true) {
if o.Err == nil {
t.Fatal(o.Err.Error())
}
}
err = a.RemoveBucket("bucket??")
if err == nil {
t.Fatal("Error")
}
if err.Error() != "The specified bucket is not valid." {
t.Fatal("Error")
}
}
func TestObjectOperations(t *testing.T) {
object := objectHandler(objectHandler{
resource: "/bucket/object",
data: []byte("Hello, World"),
})
server := httptest.NewServer(object)
defer server.Close()
a, err := minio.New(minio.Config{Endpoint: server.URL})
if err != nil {
t.Fatal("Error")
}
data := []byte("Hello, World")
err = a.PutObject("bucket", "object", "", int64(len(data)), bytes.NewReader(data))
if err != nil {
t.Fatal("Error")
}
metadata, err := a.StatObject("bucket", "object")
if err != nil {
t.Fatal("Error")
}
if metadata.Key != "object" {
t.Fatal("Error")
}
if metadata.ETag != "9af2f8218b150c351ad802c6f3d66abe" {
t.Fatal("Error")
}
reader, metadata, err := a.GetObject("bucket", "object")
if err != nil {
t.Fatal("Error")
}
if metadata.Key != "object" {
t.Fatal("Error")
}
if metadata.ETag != "9af2f8218b150c351ad802c6f3d66abe" {
t.Fatal("Error")
}
var buffer bytes.Buffer
_, err = io.Copy(&buffer, reader)
if !bytes.Equal(buffer.Bytes(), data) {
t.Fatal("Error")
}
err = a.RemoveObject("bucket", "object")
if err != nil {
t.Fatal("Error")
}
err = a.RemoveObject("bucket", "object1")
if err == nil {
t.Fatal("Error")
}
if err.Error() != "The specified key does not exist." {
t.Fatal("Error")
}
}
func TestPresignedURL(t *testing.T) {
object := objectHandler(objectHandler{
resource: "/bucket/object",
data: []byte("Hello, World"),
})
server := httptest.NewServer(object)
defer server.Close()
a, err := minio.New(minio.Config{Endpoint: server.URL})
if err != nil {
t.Fatal("Error")
}
// should error out for invalid access keys
_, err = a.PresignedGetObject("bucket", "object", time.Duration(1000)*time.Second)
if err == nil {
t.Fatal("Error")
}
a, err = minio.New(minio.Config{
Endpoint: server.URL,
AccessKeyID: "accessKey",
SecretAccessKey: "secretKey",
})
if err != nil {
t.Fatal("Error")
}
url, err := a.PresignedGetObject("bucket", "object", time.Duration(1000)*time.Second)
if err != nil {
t.Fatal("Error")
}
if url == "" {
t.Fatal("Error")
}
_, err = a.PresignedGetObject("bucket", "object", time.Duration(0)*time.Second)
if err == nil {
t.Fatal("Error")
}
_, err = a.PresignedGetObject("bucket", "object", time.Duration(604801)*time.Second)
if err == nil {
t.Fatal("Error")
}
}
func TestErrorResponse(t *testing.T) {
errorResponse := []byte("<?xml version=\"1.0\" encoding=\"UTF-8\"?><Error><Code>AccessDenied</Code><Message>Access Denied</Message><Resource>/mybucket/myphoto.jpg</Resource><RequestId>F19772218238A85A</RequestId><HostId>GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD</HostId></Error>")
errorReader := bytes.NewReader(errorResponse)
err := minio.BodyToErrorResponse(errorReader, "application/xml")
if err == nil {
t.Fatal("Error")
}
if err.Error() != "Access Denied" {
t.Fatal("Error")
}
resp := minio.ToErrorResponse(err)
// valid all fields
if resp == nil {
t.Fatal("Error")
}
if resp.Code != "AccessDenied" {
t.Fatal("Error")
}
if resp.RequestID != "F19772218238A85A" {
t.Fatal("Error")
}
if resp.Message != "Access Denied" {
t.Fatal("Error")
}
if resp.Resource != "/mybucket/myphoto.jpg" {
t.Fatal("Error")
}
if resp.HostID != "GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD" {
t.Fatal("Error")
}
if resp.ToXML() == "" {
t.Fatal("Error")
}
if resp.ToJSON() == "" {
t.Fatal("Error")
}
}

View File

@ -0,0 +1,41 @@
# version format
version: "{build}"
# Operating system (build VM template)
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\minio\minio-go
# environment variables
environment:
GOPATH: c:\gopath
GO15VENDOREXPERIMENT: 1
# scripts that run after cloning repository
install:
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- rd C:\Go /s /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.5.1.windows-amd64.zip
- 7z x go1.5.1.windows-amd64.zip -oC:\ >nul
- go version
- go env
- go get -u github.com/golang/lint/golint
- go get -u golang.org/x/tools/cmd/vet
- go get -u github.com/fzipp/gocyclo
- go get -u github.com/remyoudompheng/go-misc/deadcode
# to run your custom scripts instead of automatic MSBuild
build_script:
- go vet ./...
- gofmt -s -l .
- golint github.com/minio/minio-go...
- gocyclo -over 30 .
- deadcode
- go test
- go test -race
# to disable automatic tests
test: off
# to disable deployment
deploy: off

View File

@ -0,0 +1,75 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
// BucketACL - bucket level access control
type BucketACL string
// different types of ACL's currently supported for buckets
const (
bucketPrivate = BucketACL("private")
bucketReadOnly = BucketACL("public-read")
bucketPublic = BucketACL("public-read-write")
bucketAuthenticated = BucketACL("authenticated-read")
)
// String printer helper
func (b BucketACL) String() string {
if string(b) == "" {
return "private"
}
return string(b)
}
// isValidBucketACL - is provided acl string supported
func (b BucketACL) isValidBucketACL() bool {
switch true {
case b.isPrivate():
fallthrough
case b.isReadOnly():
fallthrough
case b.isPublic():
fallthrough
case b.isAuthenticated():
return true
case b.String() == "private":
// by default its "private"
return true
default:
return false
}
}
// IsPrivate - is acl Private
func (b BucketACL) isPrivate() bool {
return b == bucketPrivate
}
// IsPublicRead - is acl PublicRead
func (b BucketACL) isReadOnly() bool {
return b == bucketReadOnly
}
// IsPublicReadWrite - is acl PublicReadWrite
func (b BucketACL) isPublic() bool {
return b == bucketPublic
}
// IsAuthenticated - is acl AuthenticatedRead
func (b BucketACL) isAuthenticated() bool {
return b == bucketAuthenticated
}

View File

@ -0,0 +1,136 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"crypto/md5"
"io"
)
// part - message structure for results from the MultiPart
type part struct {
MD5Sum []byte
ReadSeeker io.ReadSeeker
Err error
Len int64
Num int // part number
}
// skipPart - skipping uploaded parts
type skipPart struct {
md5sum []byte
partNumber int
}
// chopper reads from io.Reader, partitions the data into chunks of given chunksize, and sends
// each chunk as io.ReadSeeker to the caller over a channel
//
// This method runs until an EOF or error occurs. If an error occurs,
// the method sends the error over the channel and returns.
// Before returning, the channel is always closed.
//
// additionally this function also skips list of parts if provided
func chopper(reader io.Reader, chunkSize int64, skipParts []skipPart) <-chan part {
ch := make(chan part, 3)
go chopperInRoutine(reader, chunkSize, skipParts, ch)
return ch
}
func chopperInRoutine(reader io.Reader, chunkSize int64, skipParts []skipPart, ch chan part) {
defer close(ch)
p := make([]byte, chunkSize)
n, err := io.ReadFull(reader, p)
if err == io.EOF || err == io.ErrUnexpectedEOF { // short read, only single part return
m := md5.Sum(p[0:n])
ch <- part{
MD5Sum: m[:],
ReadSeeker: bytes.NewReader(p[0:n]),
Err: nil,
Len: int64(n),
Num: 1,
}
return
}
// catastrophic error send error and return
if err != nil {
ch <- part{
ReadSeeker: nil,
Err: err,
Num: 0,
}
return
}
// send the first part
var num = 1
md5SumBytes := md5.Sum(p)
sp := skipPart{
partNumber: num,
md5sum: md5SumBytes[:],
}
if !isPartNumberUploaded(sp, skipParts) {
ch <- part{
MD5Sum: md5SumBytes[:],
ReadSeeker: bytes.NewReader(p),
Err: nil,
Len: int64(n),
Num: num,
}
}
for err == nil {
var n int
p := make([]byte, chunkSize)
n, err = io.ReadFull(reader, p)
if err != nil {
if err != io.EOF && err != io.ErrUnexpectedEOF { // catastrophic error
ch <- part{
ReadSeeker: nil,
Err: err,
Num: 0,
}
return
}
}
num++
md5SumBytes := md5.Sum(p[0:n])
sp := skipPart{
partNumber: num,
md5sum: md5SumBytes[:],
}
if isPartNumberUploaded(sp, skipParts) {
continue
}
ch <- part{
MD5Sum: md5SumBytes[:],
ReadSeeker: bytes.NewReader(p[0:n]),
Err: nil,
Len: int64(n),
Num: num,
}
}
}
// to verify if partNumber is part of the skip part list
func isPartNumberUploaded(part skipPart, skipParts []skipPart) bool {
for _, p := range skipParts {
if p.partNumber == part.partNumber && bytes.Equal(p.md5sum, part.md5sum) {
return true
}
}
return false
}

View File

@ -0,0 +1,115 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"encoding/xml"
"io"
"strings"
"time"
)
// decoder provides a unified decoding method interface
type decoder interface {
Decode(v interface{}) error
}
// acceptTypeDecoder provide decoded value in given acceptType
func acceptTypeDecoder(body io.Reader, acceptType string, v interface{}) error {
var d decoder
switch {
case acceptType == "application/xml":
d = xml.NewDecoder(body)
case acceptType == "application/json":
d = json.NewDecoder(body)
default:
d = xml.NewDecoder(body)
}
return d.Decode(v)
}
// sum256Reader calculate sha256 sum for an input read seeker
func sum256Reader(reader io.ReadSeeker) ([]byte, error) {
h := sha256.New()
var err error
start, _ := reader.Seek(0, 1)
defer reader.Seek(start, 0)
for err == nil {
length := 0
byteBuffer := make([]byte, 1024*1024)
length, err = reader.Read(byteBuffer)
byteBuffer = byteBuffer[0:length]
h.Write(byteBuffer)
}
if err != io.EOF {
return nil, err
}
return h.Sum(nil), nil
}
// sum256 calculate sha256 sum for an input byte array
func sum256(data []byte) []byte {
hash := sha256.New()
hash.Write(data)
return hash.Sum(nil)
}
// sumHMAC calculate hmac between two input byte array
func sumHMAC(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
// getSigningKey hmac seed to calculate final signature
func getSigningKey(secret, region string, t time.Time) []byte {
date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
regionbytes := sumHMAC(date, []byte(region))
service := sumHMAC(regionbytes, []byte("s3"))
signingKey := sumHMAC(service, []byte("aws4_request"))
return signingKey
}
// getSignature final signature in hexadecimal form
func getSignature(signingKey []byte, stringToSign string) string {
return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
}
// getScope generate a string of a specific date, an AWS region, and a service
func getScope(region string, t time.Time) string {
scope := strings.Join([]string{
t.Format(yyyymmdd),
region,
"s3",
"aws4_request",
}, "/")
return scope
}
// getCredential generate a credential string
func getCredential(accessKeyID, region string, t time.Time) string {
scope := getScope(region, t)
return accessKeyID + "/" + scope
}

View File

@ -0,0 +1,181 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"encoding/xml"
"time"
)
// listAllMyBucketsResult container for listBuckets response
type listAllMyBucketsResult struct {
// Container for one or more buckets.
Buckets struct {
Bucket []BucketStat
}
Owner owner
}
// owner container for bucket owner information
type owner struct {
DisplayName string
ID string
}
// commonPrefix container for prefix response
type commonPrefix struct {
Prefix string
}
// listBucketResult container for listObjects response
type listBucketResult struct {
CommonPrefixes []commonPrefix // A response can contain CommonPrefixes only if you have specified a delimiter
Contents []ObjectStat // Metadata about each object returned
Delimiter string
// Encoding type used to encode object keys in the response.
EncodingType string
// A flag that indicates whether or not ListObjects returned all of the results
// that satisfied the search criteria.
IsTruncated bool
Marker string
MaxKeys int64
Name string
// When response is truncated (the IsTruncated element value in the response
// is true), you can use the key name in this field as marker in the subsequent
// request to get next set of objects. Object storage lists objects in alphabetical
// order Note: This element is returned only if you have delimiter request parameter
// specified. If response does not include the NextMaker and it is truncated,
// you can use the value of the last Key in the response as the marker in the
// subsequent request to get the next set of object keys.
NextMarker string
Prefix string
}
// listMultipartUploadsResult container for ListMultipartUploads response
type listMultipartUploadsResult struct {
Bucket string
KeyMarker string
UploadIDMarker string `xml:"UploadIdMarker"`
NextKeyMarker string
NextUploadIDMarker string `xml:"NextUploadIdMarker"`
EncodingType string
MaxUploads int64
IsTruncated bool
Uploads []ObjectMultipartStat `xml:"Upload"`
Prefix string
Delimiter string
CommonPrefixes []commonPrefix // A response can contain CommonPrefixes only if you specify a delimiter
}
// initiator container for who initiated multipart upload
type initiator struct {
ID string
DisplayName string
}
// partMetadata container for particular part of an object
type partMetadata struct {
// Part number identifies the part.
PartNumber int
// Date and time the part was uploaded.
LastModified time.Time
// Entity tag returned when the part was uploaded, usually md5sum of the part
ETag string
// Size of the uploaded part data.
Size int64
}
// listObjectPartsResult container for ListObjectParts response.
type listObjectPartsResult struct {
Bucket string
Key string
UploadID string `xml:"UploadId"`
Initiator initiator
Owner owner
StorageClass string
PartNumberMarker int
NextPartNumberMarker int
MaxParts int
// Indicates whether the returned list of parts is truncated.
IsTruncated bool
Parts []partMetadata `xml:"Part"`
EncodingType string
}
// initiateMultipartUploadResult container for InitiateMultiPartUpload response.
type initiateMultipartUploadResult struct {
Bucket string
Key string
UploadID string `xml:"UploadId"`
}
// completeMultipartUploadResult container for completed multipart upload response.
type completeMultipartUploadResult struct {
Location string
Bucket string
Key string
ETag string
}
// completePart sub container lists individual part numbers and their md5sum, part of completeMultipartUpload.
type completePart struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"`
// Part number identifies the part.
PartNumber int
ETag string
}
// completeMultipartUpload container for completing multipart upload
type completeMultipartUpload struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
Parts []completePart `xml:"Part"`
}
// createBucketConfiguration container for bucket configuration
type createBucketConfiguration struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
Location string `xml:"LocationConstraint"`
}
type grant struct {
Grantee struct {
ID string
DisplayName string
EmailAddress string
Type string
URI string
}
Permission string
}
type accessControlPolicy struct {
AccessControlList struct {
Grant []grant
}
Owner owner
}

View File

@ -0,0 +1,168 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"encoding/json"
"encoding/xml"
"io"
"regexp"
"strings"
"unicode/utf8"
)
/* **** SAMPLE ERROR RESPONSE ****
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>AccessDenied</Code>
<Message>Access Denied</Message>
<Resource>/mybucket/myphoto.jpg</Resource>
<RequestId>F19772218238A85A</RequestId>
<HostId>GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD</HostId>
</Error>
*/
// ErrorResponse is the type error returned by some API operations.
type ErrorResponse struct {
XMLName xml.Name `xml:"Error" json:"-"`
Code string
Message string
Resource string
RequestID string `xml:"RequestId"`
HostID string `xml:"HostId"`
}
// ToErrorResponse returns parsed ErrorResponse struct, if input is nil or not ErrorResponse return value is nil
// this fuction is useful when some one wants to dig deeper into the error structures over the network.
//
// for example:
//
// import s3 "github.com/minio/minio-go"
// ...
// ...
// ..., err := s3.GetObject(...)
// if err != nil {
// resp := s3.ToErrorResponse(err)
// fmt.Println(resp.ToXML())
// }
// ...
// ...
func ToErrorResponse(err error) *ErrorResponse {
switch err := err.(type) {
case ErrorResponse:
return &err
default:
return nil
}
}
// ToXML send raw xml marshalled as string
func (e ErrorResponse) ToXML() string {
b, err := xml.Marshal(&e)
if err != nil {
panic(err)
}
return string(b)
}
// ToJSON send raw json marshalled as string
func (e ErrorResponse) ToJSON() string {
b, err := json.Marshal(&e)
if err != nil {
panic(err)
}
return string(b)
}
// Error formats HTTP error string
func (e ErrorResponse) Error() string {
return e.Message
}
// BodyToErrorResponse returns a new encoded ErrorResponse structure
func BodyToErrorResponse(errBody io.Reader, acceptType string) error {
var errorResponse ErrorResponse
err := acceptTypeDecoder(errBody, acceptType, &errorResponse)
if err != nil {
return err
}
return errorResponse
}
// invalidBucketToError - invalid bucket to errorResponse
func invalidBucketError(bucket string) error {
// verify bucket name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
isValidBucket := func(bucket string) bool {
if len(bucket) < 3 || len(bucket) > 63 {
return false
}
if bucket[0] == '.' || bucket[len(bucket)-1] == '.' {
return false
}
if match, _ := regexp.MatchString("\\.\\.", bucket); match == true {
return false
}
// We don't support buckets with '.' in them
match, _ := regexp.MatchString("^[a-zA-Z][a-zA-Z0-9\\-]+[a-zA-Z0-9]$", bucket)
return match
}
if !isValidBucket(strings.TrimSpace(bucket)) {
// no resource since bucket is empty string
errorResponse := ErrorResponse{
Code: "InvalidBucketName",
Message: "The specified bucket is not valid.",
RequestID: "minio",
}
return errorResponse
}
return nil
}
// invalidObjectError invalid object name to errorResponse
func invalidObjectError(object string) error {
if strings.TrimSpace(object) == "" || object == "" {
// no resource since object name is empty
errorResponse := ErrorResponse{
Code: "NoSuchKey",
Message: "The specified key does not exist.",
RequestID: "minio",
}
return errorResponse
}
return nil
}
// invalidArgumentError invalid argument to errorResponse
func invalidArgumentError(arg string) error {
errorResponse := ErrorResponse{
Code: "InvalidArgument",
Message: "Invalid Argument",
RequestID: "minio",
}
if strings.TrimSpace(arg) == "" || arg == "" {
// no resource since arg is empty string
return errorResponse
}
if !utf8.ValidString(arg) {
// add resource to reply back with invalid string
errorResponse.Resource = arg
return errorResponse
}
return nil
}

View File

@ -0,0 +1,40 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
Endpoint: "https://play.minio.io:9000",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
err = s3Client.BucketExists("mybucket")
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
}

View File

@ -0,0 +1,41 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
Endpoint: "https://play.minio.io:9000",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
acl, err := s3Client.GetBucketACL("mybucket")
if err != nil {
log.Fatalln(err)
}
log.Println(acl)
}

View File

@ -0,0 +1,51 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"io"
"log"
"os"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
Endpoint: "https://play.minio.io:9000",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
reader, stat, err := s3Client.GetObject("mybucket", "myobject")
if err != nil {
log.Fatalln(err)
}
localfile, err := os.Create("testfile")
if err != nil {
log.Fatalln(err)
}
defer localfile.Close()
if _, err = io.CopyN(localfile, reader, stat.Size); err != nil {
log.Fatalln(err)
}
}

View File

@ -0,0 +1,51 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"io"
"log"
"os"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
Endpoint: "https://play.minio.io:9000",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
reader, stat, err := s3Client.GetPartialObject("mybucket", "myobject", 0, 10)
if err != nil {
log.Fatalln(err)
}
localfile, err := os.Create("testfile")
if err != nil {
log.Fatalln(err)
}
defer localfile.Close()
if _, err = io.CopyN(localfile, reader, stat.Size); err != nil {
log.Fatalln(err)
}
}

View File

@ -0,0 +1,41 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
Endpoint: "https://play.minio.io:9000",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
for bucket := range s3Client.ListBuckets() {
if bucket.Err != nil {
log.Fatalln(bucket.Err)
}
log.Println(bucket.Stat)
}
}

View File

@ -0,0 +1,44 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://play.minio.io:9000",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
// Recursive
for multipartObject := range s3Client.ListIncompleteUploads("mybucket", "myobject", true) {
if multipartObject.Err != nil {
log.Fatalln(multipartObject.Err)
}
log.Println(multipartObject)
}
}

View File

@ -0,0 +1,41 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
Endpoint: "https://play.minio.io:9000",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
for object := range s3Client.ListObjects("mybucket", "", true) {
if object.Err != nil {
log.Fatalln(object.Err)
}
log.Println(object.Stat)
}
}

View File

@ -0,0 +1,40 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
Endpoint: "https://play.minio.io:9000",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
err = s3Client.MakeBucket("mybucket", "")
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
}

View File

@ -0,0 +1,52 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"os"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
Endpoint: "https://play.minio.io:9000",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
object, err := os.Open("testfile")
if err != nil {
log.Fatalln(err)
}
defer object.Close()
objectInfo, err := object.Stat()
if err != nil {
object.Close()
log.Fatalln(err)
}
err = s3Client.PutObject("mybucket", "myobject", "application/octet-stream", objectInfo.Size(), object)
if err != nil {
log.Fatalln(err)
}
}

View File

@ -0,0 +1,43 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://play.minio.io:9000",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
err = s3Client.RemoveBucket("mybucket")
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
}

View File

@ -0,0 +1,41 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
Endpoint: "https://play.minio.io:9000",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
for err := range s3Client.RemoveIncompleteUpload("mybucket", "myobject") {
if err != nil {
log.Fatalln(err)
}
}
log.Println("Success")
}

View File

@ -0,0 +1,42 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://play.minio.io:9000",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
err = s3Client.RemoveObject("mybucket", "myobject")
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
}

View File

@ -0,0 +1,40 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
Endpoint: "https://play.minio.io:9000",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
err = s3Client.SetBucketACL("mybucket", minio.BucketACL("public-read-write"))
if err != nil {
log.Fatalln(err)
}
}

View File

@ -0,0 +1,40 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
Endpoint: "https://play.minio.io:9000",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
stat, err := s3Client.StatObject("mybucket", "myobject")
if err != nil {
log.Fatalln(err)
}
log.Println(stat)
}

View File

@ -0,0 +1,42 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://s3.amazonaws.com",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
err = s3Client.BucketExists("mybucket")
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
}

View File

@ -0,0 +1,43 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://s3.amazonaws.com",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
acl, err := s3Client.GetBucketACL("mybucket")
if err != nil {
log.Fatalln(err)
}
log.Println(acl)
}

View File

@ -0,0 +1,53 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"io"
"log"
"os"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://s3.amazonaws.com",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
reader, stat, err := s3Client.GetObject("mybucket", "myobject")
if err != nil {
log.Fatalln(err)
}
localfile, err := os.Create("testfile")
if err != nil {
log.Fatalln(err)
}
defer localfile.Close()
if _, err = io.CopyN(localfile, reader, stat.Size); err != nil {
log.Fatalln(err)
}
}

View File

@ -0,0 +1,53 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"io"
"log"
"os"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://s3.amazonaws.com",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
reader, stat, err := s3Client.GetPartialObject("mybucket", "myobject", 0, 10)
if err != nil {
log.Fatalln(err)
}
localfile, err := os.Create("testfile")
if err != nil {
log.Fatalln(err)
}
defer localfile.Close()
if _, err = io.CopyN(localfile, reader, stat.Size); err != nil {
log.Fatalln(err)
}
}

View File

@ -0,0 +1,43 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://s3.amazonaws.com",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
for bucket := range s3Client.ListBuckets() {
if bucket.Err != nil {
log.Fatalln(bucket.Err)
}
log.Println(bucket.Stat)
}
}

View File

@ -0,0 +1,44 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://s3.amazonaws.com",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
// Recursive
for multipartObject := range s3Client.ListIncompleteUploads("mybucket", "myobject", true) {
if multipartObject.Err != nil {
log.Fatalln(multipartObject.Err)
}
log.Println(multipartObject)
}
}

View File

@ -0,0 +1,43 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://s3.amazonaws.com",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
for object := range s3Client.ListObjects("mybucket", "", true) {
if object.Err != nil {
log.Fatalln(object.Err)
}
log.Println(object.Stat)
}
}

View File

@ -0,0 +1,42 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://s3.amazonaws.com",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
err = s3Client.MakeBucket("mybucket", "")
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
}

View File

@ -0,0 +1,43 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"time"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://s3.amazonaws.com",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
string, err := s3Client.PresignedGetObject("mybucket", "myobject", time.Duration(1000)*time.Second)
if err != nil {
log.Fatalln(err)
}
log.Println(string)
}

View File

@ -0,0 +1,54 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"log"
"time"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://s3.amazonaws.com",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
policy := minio.NewPostPolicy()
policy.SetKey("myobject")
policy.SetBucket("mybucket")
policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
m, err := s3Client.PresignedPostPolicy(policy)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("curl ")
for k, v := range m {
fmt.Printf("-F %s=%s ", k, v)
}
fmt.Printf("-F file=@/etc/bashrc ")
fmt.Printf(config.Endpoint + "/mybucket\n")
}

View File

@ -0,0 +1,43 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"time"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://s3.amazonaws.com",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
string, err := s3Client.PresignedPutObject("mybucket", "myobject", time.Duration(1000)*time.Second)
if err != nil {
log.Fatalln(err)
}
log.Println(string)
}

View File

@ -0,0 +1,54 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"os"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://s3.amazonaws.com",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
object, err := os.Open("testfile")
if err != nil {
log.Fatalln(err)
}
defer object.Close()
objectInfo, err := object.Stat()
if err != nil {
object.Close()
log.Fatalln(err)
}
err = s3Client.PutObject("mybucket", "myobject", "application/octet-stream", objectInfo.Size(), object)
if err != nil {
log.Fatalln(err)
}
}

View File

@ -0,0 +1,43 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://s3.amazonaws.com",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
err = s3Client.RemoveBucket("mybucket")
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
}

View File

@ -0,0 +1,43 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://s3.amazonaws.com",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
for err := range s3Client.RemoveIncompleteUpload("mybucket", "myobject") {
if err != nil {
log.Fatalln(err)
}
}
log.Println("Success")
}

View File

@ -0,0 +1,42 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://s3.amazonaws.com",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
err = s3Client.RemoveObject("mybucket", "myobject")
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
}

View File

@ -0,0 +1,42 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://s3.amazonaws.com",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
err = s3Client.SetBucketACL("mybucket", minio.BucketACL("public-read-write"))
if err != nil {
log.Fatalln(err)
}
}

View File

@ -0,0 +1,42 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
config := minio.Config{
AccessKeyID: "YOUR-ACCESS-KEY-HERE",
SecretAccessKey: "YOUR-PASSWORD-HERE",
Endpoint: "https://s3.amazonaws.com",
}
s3Client, err := minio.New(config)
if err != nil {
log.Fatalln(err)
}
stat, err := s3Client.StatObject("mybucket", "myobject")
if err != nil {
log.Fatalln(err)
}
log.Println(stat)
}

View File

@ -0,0 +1,152 @@
package minio
import (
"encoding/base64"
"errors"
"fmt"
"strings"
"time"
)
// expirationDateFormat date format for expiration key in json policy
const expirationDateFormat = "2006-01-02T15:04:05.999Z"
// Policy explanation: http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
type policy struct {
matchType string
key string
value string
}
// PostPolicy provides strict static type conversion and validation for Amazon S3's POST policy JSON string.
type PostPolicy struct {
expiration time.Time // expiration date and time of the POST policy.
policies []policy
contentLengthRange struct {
min int
max int
}
// Post form data
formData map[string]string
}
// NewPostPolicy instantiate new post policy
func NewPostPolicy() *PostPolicy {
p := &PostPolicy{}
p.policies = make([]policy, 0)
p.formData = make(map[string]string)
return p
}
// SetExpires expiration time
func (p *PostPolicy) SetExpires(t time.Time) error {
if t.IsZero() {
return errors.New("time input invalid")
}
p.expiration = t
return nil
}
// SetKey Object name
func (p *PostPolicy) SetKey(key string) error {
if strings.TrimSpace(key) == "" || key == "" {
return errors.New("key invalid")
}
policy := policy{"eq", "$key", key}
p.policies = append(p.policies, policy)
p.formData["key"] = key
return nil
}
// SetKeyStartsWith Object name that can start with
func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error {
if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" {
return errors.New("key-starts-with invalid")
}
policy := policy{"starts-with", "$key", keyStartsWith}
p.policies = append(p.policies, policy)
p.formData["key"] = keyStartsWith
return nil
}
// SetBucket bucket name
func (p *PostPolicy) SetBucket(bucket string) error {
if strings.TrimSpace(bucket) == "" || bucket == "" {
return errors.New("bucket invalid")
}
policy := policy{"eq", "$bucket", bucket}
p.policies = append(p.policies, policy)
p.formData["bucket"] = bucket
return nil
}
// SetContentType content-type
func (p *PostPolicy) SetContentType(contentType string) error {
if strings.TrimSpace(contentType) == "" || contentType == "" {
return errors.New("contentType invalid")
}
policy := policy{"eq", "$Content-Type", contentType}
if err := p.addNewPolicy(policy); err != nil {
return err
}
p.formData["Content-Type"] = contentType
return nil
}
// SetContentLength - set new min and max content legnth condition
func (p *PostPolicy) SetContentLength(min, max int) error {
if min > max {
return errors.New("minimum cannot be bigger than maximum")
}
if min < 0 {
return errors.New("minimum cannot be negative")
}
if max < 0 {
return errors.New("maximum cannot be negative")
}
p.contentLengthRange.min = min
p.contentLengthRange.max = max
return nil
}
// addNewPolicy - internal helper to validate adding new policies
func (p *PostPolicy) addNewPolicy(po policy) error {
if po.matchType == "" || po.key == "" || po.value == "" {
return errors.New("policy invalid")
}
p.policies = append(p.policies, po)
return nil
}
// Stringer interface for printing in pretty manner
func (p PostPolicy) String() string {
return string(p.marshalJSON())
}
// marshalJSON provides Marshalled JSON
func (p PostPolicy) marshalJSON() []byte {
expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"`
var policiesStr string
policies := []string{}
for _, po := range p.policies {
policies = append(policies, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.key, po.value))
}
if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 {
policies = append(policies, fmt.Sprintf("[\"content-length-range\", %d, %d]",
p.contentLengthRange.min, p.contentLengthRange.max))
}
if len(policies) > 0 {
policiesStr = `"conditions":[` + strings.Join(policies, ",") + "]"
}
retStr := "{"
retStr = retStr + expirationStr + ","
retStr = retStr + policiesStr
retStr = retStr + "}"
return []byte(retStr)
}
// base64 produces base64 of PostPolicy's Marshalled json
func (p PostPolicy) base64() string {
return base64.StdEncoding.EncodeToString(p.marshalJSON())
}

View File

@ -0,0 +1,498 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"encoding/hex"
"errors"
"io"
"io/ioutil"
"net/http"
"regexp"
"sort"
"strings"
"time"
"unicode/utf8"
)
// operation - rest operation
type operation struct {
HTTPServer string
HTTPMethod string
HTTPPath string
}
// request - a http request
type request struct {
req *http.Request
config *Config
body io.ReadSeeker
expires string
}
const (
authHeader = "AWS4-HMAC-SHA256"
iso8601DateFormat = "20060102T150405Z"
yyyymmdd = "20060102"
)
///
/// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258
///
/// User-Agent:
///
/// This is ignored from signing because signing this causes problems with generating pre-signed URLs
/// (that are executed by other agents) or when customers pass requests through proxies, which may
/// modify the user-agent.
///
/// Content-Length:
///
/// This is ignored from signing because generating a pre-signed URL should not provide a content-length
/// constraint, specifically when vending a S3 pre-signed PUT URL. The corollary to this is that when
/// sending regular requests (non-pre-signed), the signature contains a checksum of the body, which
/// implicitly validates the payload length (since changing the number of bytes would change the checksum)
/// and therefore this header is not valuable in the signature.
///
/// Content-Type:
///
/// Signing this header causes quite a number of problems in browser environments, where browsers
/// like to modify and normalize the content-type header in different ways. There is more information
/// on this in https://github.com/aws/aws-sdk-js/issues/244. Avoiding this field simplifies logic
/// and reduces the possibility of future bugs
///
/// Authorization:
///
/// Is skipped for obvious reasons
///
var ignoredHeaders = map[string]bool{
"Authorization": true,
"Content-Type": true,
"Content-Length": true,
"User-Agent": true,
}
// getURLEncodedPath encode the strings from UTF-8 byte representations to HTML hex escape sequences
//
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
// non english characters cannot be parsed due to the nature in which url.Encode() is written
//
// This function on the other hand is a direct replacement for url.Encode() technique to support
// pretty much every UTF-8 character.
func getURLEncodedPath(pathName string) string {
// if object matches reserved string, no need to encode them
reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
if reservedNames.MatchString(pathName) {
return pathName
}
var encodedPathname string
for _, s := range pathName {
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
}
switch s {
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
default:
len := utf8.RuneLen(s)
if len < 0 {
// if utf8 cannot convert return the same string as is
return pathName
}
u := make([]byte, len)
utf8.EncodeRune(u, s)
for _, r := range u {
hex := hex.EncodeToString([]byte{r})
encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
}
}
}
return encodedPathname
}
func path2BucketAndObject(path string) (bucketName, objectName string) {
pathSplits := strings.SplitN(path, "?", 2)
splits := strings.SplitN(pathSplits[0], separator, 3)
switch len(splits) {
case 0, 1:
bucketName = ""
objectName = ""
case 2:
bucketName = splits[1]
objectName = ""
case 3:
bucketName = splits[1]
objectName = splits[2]
}
return bucketName, objectName
}
// path2Object gives objectName from URL path
func path2Object(path string) (objectName string) {
_, objectName = path2BucketAndObject(path)
return
}
// path2Bucket gives bucketName from URL path
func path2Bucket(path string) (bucketName string) {
bucketName, _ = path2BucketAndObject(path)
return
}
// path2Query gives query part from URL path
func path2Query(path string) (query string) {
pathSplits := strings.SplitN(path, "?", 2)
if len(pathSplits) > 1 {
query = pathSplits[1]
}
return
}
func (op *operation) getRequestURL(config Config) (url string) {
// parse URL for the combination of HTTPServer + HTTPPath
url = op.HTTPServer + separator
if !config.isVirtualStyle {
url += path2Bucket(op.HTTPPath)
}
objectName := getURLEncodedPath(path2Object(op.HTTPPath))
queryPath := path2Query(op.HTTPPath)
if objectName == "" && queryPath != "" {
url += "?" + queryPath
return
}
if objectName != "" && queryPath == "" {
if strings.HasSuffix(url, separator) {
url += objectName
} else {
url += separator + objectName
}
return
}
if objectName != "" && queryPath != "" {
if strings.HasSuffix(url, separator) {
url += objectName + "?" + queryPath
} else {
url += separator + objectName + "?" + queryPath
}
}
return
}
func newPresignedRequest(op *operation, config *Config, expires string) (*request, error) {
// if no method default to POST
method := op.HTTPMethod
if method == "" {
method = "POST"
}
u := op.getRequestURL(*config)
// get a new HTTP request, for the requested method
req, err := http.NewRequest(method, u, nil)
if err != nil {
return nil, err
}
// set UserAgent
req.Header.Set("User-Agent", config.userAgent)
// set Accept header for response encoding style, if available
if config.AcceptType != "" {
req.Header.Set("Accept", config.AcceptType)
}
// save for subsequent use
r := new(request)
r.config = config
r.expires = expires
r.req = req
r.body = nil
return r, nil
}
// newUnauthenticatedRequest - instantiate a new unauthenticated request
func newUnauthenticatedRequest(op *operation, config *Config, body io.Reader) (*request, error) {
// if no method default to POST
method := op.HTTPMethod
if method == "" {
method = "POST"
}
u := op.getRequestURL(*config)
// get a new HTTP request, for the requested method
req, err := http.NewRequest(method, u, nil)
if err != nil {
return nil, err
}
// set UserAgent
req.Header.Set("User-Agent", config.userAgent)
// set Accept header for response encoding style, if available
if config.AcceptType != "" {
req.Header.Set("Accept", config.AcceptType)
}
// add body
switch {
case body == nil:
req.Body = nil
default:
req.Body = ioutil.NopCloser(body)
}
// save for subsequent use
r := new(request)
r.req = req
r.config = config
return r, nil
}
// newRequest - instantiate a new request
func newRequest(op *operation, config *Config, body io.ReadSeeker) (*request, error) {
// if no method default to POST
method := op.HTTPMethod
if method == "" {
method = "POST"
}
u := op.getRequestURL(*config)
// get a new HTTP request, for the requested method
req, err := http.NewRequest(method, u, nil)
if err != nil {
return nil, err
}
// set UserAgent
req.Header.Set("User-Agent", config.userAgent)
// set Accept header for response encoding style, if available
if config.AcceptType != "" {
req.Header.Set("Accept", config.AcceptType)
}
// add body
switch {
case body == nil:
req.Body = nil
default:
req.Body = ioutil.NopCloser(body)
}
// save for subsequent use
r := new(request)
r.config = config
r.req = req
r.body = body
return r, nil
}
// Do - start the request
func (r *request) Do() (resp *http.Response, err error) {
if r.config.AccessKeyID != "" && r.config.SecretAccessKey != "" {
r.SignV4()
}
transport := http.DefaultTransport
if r.config.Transport != nil {
transport = r.config.Transport
}
// do not use http.Client{}, while it may seem intuitive but the problem seems to be
// that http.Client{} internally follows redirects and there is no easier way to disable
// it from outside using a configuration parameter -
// this auto redirect causes complications in verifying subsequent errors
//
// The best is to use RoundTrip() directly, so the request comes back to the caller where
// we are going to handle such replies. And indeed that is the right thing to do here.
//
return transport.RoundTrip(r.req)
}
// Set - set additional headers if any
func (r *request) Set(key, value string) {
r.req.Header.Set(key, value)
}
// Get - get header values
func (r *request) Get(key string) string {
return r.req.Header.Get(key)
}
// getHashedPayload get the hexadecimal value of the SHA256 hash of the request payload
func (r *request) getHashedPayload() string {
hash := func() string {
switch {
case r.expires != "":
return "UNSIGNED-PAYLOAD"
case r.body == nil:
return hex.EncodeToString(sum256([]byte{}))
default:
sum256Bytes, _ := sum256Reader(r.body)
return hex.EncodeToString(sum256Bytes)
}
}
hashedPayload := hash()
if hashedPayload != "UNSIGNED-PAYLOAD" {
r.req.Header.Set("X-Amz-Content-Sha256", hashedPayload)
}
return hashedPayload
}
// getCanonicalHeaders generate a list of request headers with their values
func (r *request) getCanonicalHeaders() string {
var headers []string
vals := make(map[string][]string)
for k, vv := range r.req.Header {
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
continue // ignored header
}
headers = append(headers, strings.ToLower(k))
vals[strings.ToLower(k)] = vv
}
headers = append(headers, "host")
sort.Strings(headers)
var buf bytes.Buffer
for _, k := range headers {
buf.WriteString(k)
buf.WriteByte(':')
switch {
case k == "host":
buf.WriteString(r.req.URL.Host)
fallthrough
default:
for idx, v := range vals[k] {
if idx > 0 {
buf.WriteByte(',')
}
buf.WriteString(v)
}
buf.WriteByte('\n')
}
}
return buf.String()
}
// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names
func (r *request) getSignedHeaders() string {
var headers []string
for k := range r.req.Header {
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
continue // ignored header
}
headers = append(headers, strings.ToLower(k))
}
headers = append(headers, "host")
sort.Strings(headers)
return strings.Join(headers, ";")
}
// getCanonicalRequest generate a canonical request of style
//
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
//
func (r *request) getCanonicalRequest(hashedPayload string) string {
r.req.URL.RawQuery = strings.Replace(r.req.URL.Query().Encode(), "+", "%20", -1)
canonicalRequest := strings.Join([]string{
r.req.Method,
getURLEncodedPath(r.req.URL.Path),
r.req.URL.RawQuery,
r.getCanonicalHeaders(),
r.getSignedHeaders(),
hashedPayload,
}, "\n")
return canonicalRequest
}
// getStringToSign a string based on selected query values
func (r *request) getStringToSign(canonicalRequest string, t time.Time) string {
stringToSign := authHeader + "\n" + t.Format(iso8601DateFormat) + "\n"
stringToSign = stringToSign + getScope(r.config.Region, t) + "\n"
stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
return stringToSign
}
// Presign the request, in accordance with - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
func (r *request) PreSignV4() (string, error) {
if r.config.AccessKeyID == "" && r.config.SecretAccessKey == "" {
return "", errors.New("presign requires accesskey and secretkey")
}
r.SignV4()
return r.req.URL.String(), nil
}
func (r *request) PostPresignSignature(policyBase64 string, t time.Time) string {
signingkey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t)
signature := getSignature(signingkey, policyBase64)
return signature
}
// SignV4 the request before Do(), in accordance with - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
func (r *request) SignV4() {
query := r.req.URL.Query()
if r.expires != "" {
query.Set("X-Amz-Algorithm", authHeader)
}
t := time.Now().UTC()
// Add date if not present
if r.expires != "" {
query.Set("X-Amz-Date", t.Format(iso8601DateFormat))
query.Set("X-Amz-Expires", r.expires)
} else {
r.Set("X-Amz-Date", t.Format(iso8601DateFormat))
}
hashedPayload := r.getHashedPayload()
signedHeaders := r.getSignedHeaders()
if r.expires != "" {
query.Set("X-Amz-SignedHeaders", signedHeaders)
}
credential := getCredential(r.config.AccessKeyID, r.config.Region, t)
if r.expires != "" {
query.Set("X-Amz-Credential", credential)
r.req.URL.RawQuery = query.Encode()
}
canonicalRequest := r.getCanonicalRequest(hashedPayload)
stringToSign := r.getStringToSign(canonicalRequest, t)
signingKey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t)
signature := getSignature(signingKey, stringToSign)
if r.expires != "" {
r.req.URL.RawQuery += "&X-Amz-Signature=" + signature
} else {
// final Authorization header
parts := []string{
authHeader + " Credential=" + credential,
"SignedHeaders=" + signedHeaders,
"Signature=" + signature,
}
auth := strings.Join(parts, ", ")
r.Set("Authorization", auth)
}
}

View File

@ -1,74 +0,0 @@
package aws
import (
"time"
)
// AttemptStrategy represents a strategy for waiting for an action
// to complete successfully. This is an internal type used by the
// implementation of other goamz packages.
type AttemptStrategy struct {
Total time.Duration // total duration of attempt.
Delay time.Duration // interval between each try in the burst.
Min int // minimum number of retries; overrides Total
}
type Attempt struct {
strategy AttemptStrategy
last time.Time
end time.Time
force bool
count int
}
// Start begins a new sequence of attempts for the given strategy.
func (s AttemptStrategy) Start() *Attempt {
now := time.Now()
return &Attempt{
strategy: s,
last: now,
end: now.Add(s.Total),
force: true,
}
}
// Next waits until it is time to perform the next attempt or returns
// false if it is time to stop trying.
func (a *Attempt) Next() bool {
now := time.Now()
sleep := a.nextSleep(now)
if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count {
return false
}
a.force = false
if sleep > 0 && a.count > 0 {
time.Sleep(sleep)
now = time.Now()
}
a.count++
a.last = now
return true
}
func (a *Attempt) nextSleep(now time.Time) time.Duration {
sleep := a.strategy.Delay - now.Sub(a.last)
if sleep < 0 {
return 0
}
return sleep
}
// HasNext returns whether another attempt will be made if the current
// one fails. If it returns true, the following call to Next is
// guaranteed to return true.
func (a *Attempt) HasNext() bool {
if a.force || a.strategy.Min > a.count {
return true
}
now := time.Now()
if now.Add(a.nextSleep(now)).Before(a.end) {
a.force = true
return true
}
return false
}

View File

@ -1,59 +0,0 @@
package aws_test
import (
"time"
. "gopkg.in/check.v1"
"gopkg.in/amz.v3/aws"
)
func (S) TestAttemptTiming(c *C) {
testAttempt := aws.AttemptStrategy{
Total: 0.25e9,
Delay: 0.1e9,
}
want := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9}
got := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing
t0 := time.Now()
for a := testAttempt.Start(); a.Next(); {
got = append(got, time.Now().Sub(t0))
}
got = append(got, time.Now().Sub(t0))
c.Assert(got, HasLen, len(want))
const margin = 0.01e9
for i, got := range want {
lo := want[i] - margin
hi := want[i] + margin
if got < lo || got > hi {
c.Errorf("attempt %d want %g got %g", i, want[i].Seconds(), got.Seconds())
}
}
}
func (S) TestAttemptNextHasNext(c *C) {
a := aws.AttemptStrategy{}.Start()
c.Assert(a.Next(), Equals, true)
c.Assert(a.Next(), Equals, false)
a = aws.AttemptStrategy{}.Start()
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, false)
c.Assert(a.Next(), Equals, false)
a = aws.AttemptStrategy{Total: 2e8}.Start()
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, true)
time.Sleep(2e8)
c.Assert(a.HasNext(), Equals, true)
c.Assert(a.Next(), Equals, true)
c.Assert(a.Next(), Equals, false)
a = aws.AttemptStrategy{Total: 1e8, Min: 2}.Start()
time.Sleep(1e8)
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, true)
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, false)
c.Assert(a.Next(), Equals, false)
}

View File

@ -1,268 +0,0 @@
//
// goamz - Go packages to interact with the Amazon Web Services.
//
// https://wiki.ubuntu.com/goamz
//
// Copyright (c) 2011 Canonical Ltd.
//
package aws
import (
"errors"
"os"
"strings"
)
// Region defines the URLs where AWS services may be accessed.
//
// See http://goo.gl/d8BP1 for more details.
type Region struct {
Name string // the canonical name of this region.
EC2Endpoint string
S3Endpoint string
S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name.
S3LocationConstraint bool // true if this region requires a LocationConstraint declaration.
S3LowercaseBucket bool // true if the region requires bucket names to be lower case.
SDBEndpoint string // not all regions have simpleDB, fro eg. Frankfurt (eu-central-1) does not
SNSEndpoint string
SQSEndpoint string
IAMEndpoint string
}
func (r Region) ResolveS3BucketEndpoint(bucketName string) string {
if r.S3BucketEndpoint != "" {
return strings.ToLower(strings.Replace(r.S3BucketEndpoint, "${bucket}", bucketName, -1))
}
return strings.ToLower(r.S3Endpoint + "/" + bucketName + "/")
}
var USEast = Region{
"us-east-1", // US East (N. Virginia)
"https://ec2.us-east-1.amazonaws.com",
"https://s3.amazonaws.com",
"",
false,
false,
"https://sdb.amazonaws.com",
"https://sns.us-east-1.amazonaws.com",
"https://sqs.us-east-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var USWest = Region{
"us-west-1", //US West (N. California)
"https://ec2.us-west-1.amazonaws.com",
"https://s3-us-west-1.amazonaws.com",
"",
true,
true,
"https://sdb.us-west-1.amazonaws.com",
"https://sns.us-west-1.amazonaws.com",
"https://sqs.us-west-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var USWest2 = Region{
"us-west-2", // US West (Oregon)
"https://ec2.us-west-2.amazonaws.com",
"https://s3-us-west-2.amazonaws.com",
"",
true,
true,
"https://sdb.us-west-2.amazonaws.com",
"https://sns.us-west-2.amazonaws.com",
"https://sqs.us-west-2.amazonaws.com",
"https://iam.amazonaws.com",
}
var USGovWest = Region{
"us-gov-west-1", // Isolated regions, AWS GovCloud (US)
"https://ec2.us-gov-west-1.amazonaws.com",
"https://s3-us-gov-west-1.amazonaws.com",
"",
true,
true,
"",
"https://sns.us-gov-west-1.amazonaws.com",
"https://sqs.us-gov-west-1.amazonaws.com",
"https://iam.us-gov.amazonaws.com",
}
var EUWest = Region{
"eu-west-1", // EU (Ireland)
"https://ec2.eu-west-1.amazonaws.com",
"https://s3-eu-west-1.amazonaws.com",
"",
true,
true,
"https://sdb.eu-west-1.amazonaws.com",
"https://sns.eu-west-1.amazonaws.com",
"https://sqs.eu-west-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var EUCentral = Region{
"eu-central-1", // EU (Frankfurt)
"https://ec2.eu-central-1.amazonaws.com",
"https://s3-eu-central-1.amazonaws.com",
"",
true,
true,
"",
"https://sns.eu-central-1.amazonaws.com",
"https://sqs.eu-central-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var APSoutheast = Region{
"ap-southeast-1", // Asia Pacific (Singapore)
"https://ec2.ap-southeast-1.amazonaws.com",
"https://s3-ap-southeast-1.amazonaws.com",
"",
true,
true,
"https://sdb.ap-southeast-1.amazonaws.com",
"https://sns.ap-southeast-1.amazonaws.com",
"https://sqs.ap-southeast-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var APSoutheast2 = Region{
"ap-southeast-2", //Asia Pacific (Sydney)
"https://ec2.ap-southeast-2.amazonaws.com",
"https://s3-ap-southeast-2.amazonaws.com",
"",
true,
true,
"https://sdb.ap-southeast-2.amazonaws.com",
"https://sns.ap-southeast-2.amazonaws.com",
"https://sqs.ap-southeast-2.amazonaws.com",
"https://iam.amazonaws.com",
}
var APNortheast = Region{
"ap-northeast-1", //Asia Pacific (Tokyo)
"https://ec2.ap-northeast-1.amazonaws.com",
"https://s3-ap-northeast-1.amazonaws.com",
"",
true,
true,
"https://sdb.ap-northeast-1.amazonaws.com",
"https://sns.ap-northeast-1.amazonaws.com",
"https://sqs.ap-northeast-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var SAEast = Region{
"sa-east-1", // South America (Sao Paulo)
"https://ec2.sa-east-1.amazonaws.com",
"https://s3-sa-east-1.amazonaws.com",
"",
true,
true,
"https://sdb.sa-east-1.amazonaws.com",
"https://sns.sa-east-1.amazonaws.com",
"https://sqs.sa-east-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var CNNorth = Region{
"cn-north-1", // Isolated regions, China (Beijing)
"https://ec2.cn-north-1.amazonaws.com.cn",
"https://s3.cn-north-1.amazonaws.com.cn",
"",
true,
true,
"https://sdb.cn-north-1.amazonaws.com.cn",
"https://sns.cn-north-1.amazonaws.com.cn",
"https://sqs.cn-north-1.amazonaws.com.cn",
"https://iam.cn-north-1.amazonaws.com.cn",
}
var Regions = map[string]Region{
APNortheast.Name: APNortheast,
APSoutheast.Name: APSoutheast,
APSoutheast2.Name: APSoutheast2,
EUWest.Name: EUWest,
EUCentral.Name: EUCentral,
USEast.Name: USEast,
USWest.Name: USWest,
USWest2.Name: USWest2,
USGovWest.Name: USGovWest,
SAEast.Name: SAEast,
CNNorth.Name: CNNorth,
}
type Auth struct {
AccessKey, SecretKey string
}
var unreserved = make([]bool, 128)
var hex = "0123456789ABCDEF"
func init() {
// RFC3986
u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~"
for _, c := range u {
unreserved[c] = true
}
}
// EnvAuth creates an Auth based on environment information.
// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment
// variables are used as the first preference, but EC2_ACCESS_KEY
// and EC2_SECRET_KEY or AWS_ACCESS_KEY and AWS_SECRET_KEY
// environment variables are also supported.
func EnvAuth() (auth Auth, err error) {
auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
// first fallbaback to EC2_ env variable
if auth.AccessKey == "" && auth.SecretKey == "" {
auth.AccessKey = os.Getenv("EC2_ACCESS_KEY")
auth.SecretKey = os.Getenv("EC2_SECRET_KEY")
}
// second fallbaback to AWS_ env variable
if auth.AccessKey == "" && auth.SecretKey == "" {
auth.AccessKey = os.Getenv("AWS_ACCESS_KEY")
auth.SecretKey = os.Getenv("AWS_SECRET_KEY")
}
if auth.AccessKey == "" {
err = errors.New("AWS_ACCESS_KEY_ID not found in environment")
}
if auth.SecretKey == "" {
err = errors.New("AWS_SECRET_ACCESS_KEY not found in environment")
}
return
}
// Encode takes a string and URI-encodes it in a way suitable
// to be used in AWS signatures.
func Encode(s string) string {
encode := false
for i := 0; i != len(s); i++ {
c := s[i]
if c > 127 || !unreserved[c] {
encode = true
break
}
}
if !encode {
return s
}
e := make([]byte, len(s)*3)
ei := 0
for i := 0; i != len(s); i++ {
c := s[i]
if c > 127 || !unreserved[c] {
e[ei] = '%'
e[ei+1] = hex[c>>4]
e[ei+2] = hex[c&0xF]
ei += 3
} else {
e[ei] = c
ei += 1
}
}
return string(e[:ei])
}

View File

@ -1,84 +0,0 @@
package aws_test
import (
"os"
"strings"
"testing"
. "gopkg.in/check.v1"
"gopkg.in/amz.v3/aws"
)
func Test(t *testing.T) {
TestingT(t)
}
var _ = Suite(&S{})
type S struct {
environ []string
}
func (s *S) SetUpSuite(c *C) {
s.environ = os.Environ()
}
func (s *S) TearDownTest(c *C) {
os.Clearenv()
for _, kv := range s.environ {
l := strings.SplitN(kv, "=", 2)
os.Setenv(l[0], l[1])
}
}
func (s *S) TestEnvAuthNoSecret(c *C) {
os.Clearenv()
_, err := aws.EnvAuth()
c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY not found in environment")
}
func (s *S) TestEnvAuthNoAccess(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "foo")
_, err := aws.EnvAuth()
c.Assert(err, ErrorMatches, "AWS_ACCESS_KEY_ID not found in environment")
}
func (s *S) TestEnvAuth(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
os.Setenv("AWS_ACCESS_KEY_ID", "access")
auth, err := aws.EnvAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestEnvAuthLegacy(c *C) {
os.Clearenv()
os.Setenv("EC2_SECRET_KEY", "secret")
os.Setenv("EC2_ACCESS_KEY", "access")
auth, err := aws.EnvAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestEnvAuthAws(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_KEY", "secret")
os.Setenv("AWS_ACCESS_KEY", "access")
auth, err := aws.EnvAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestEncode(c *C) {
c.Assert(aws.Encode("foo"), Equals, "foo")
c.Assert(aws.Encode("/"), Equals, "%2F")
}
func (s *S) TestRegionsAreNamed(c *C) {
for n, r := range aws.Regions {
c.Assert(n, Equals, r.Name)
}
}

View File

@ -1,447 +0,0 @@
package aws
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"sort"
"strings"
"time"
)
var debug = log.New(
// Remove the c-style comment header to front of line to debug information.
/*os.Stdout, //*/ ioutil.Discard,
"DEBUG: ",
log.LstdFlags,
)
type Signer func(*http.Request, Auth) error
// Ensure our signers meet the interface
var _ Signer = SignV2
var _ Signer = SignV4Factory("", "")
type hasher func(io.Reader) (string, error)
const (
ISO8601BasicFormat = "20060102T150405Z"
ISO8601BasicFormatShort = "20060102"
)
// SignV2 signs an HTTP request utilizing version 2 of the AWS
// signature, and the given credentials.
func SignV2(req *http.Request, auth Auth) (err error) {
queryVals := req.URL.Query()
queryVals.Set("AWSAccessKeyId", auth.AccessKey)
queryVals.Set("SignatureVersion", "2")
queryVals.Set("SignatureMethod", "HmacSHA256")
uriStr := canonicalURI(req.URL)
queryStr := canonicalQueryString(queryVals)
payload := new(bytes.Buffer)
if err := errorCollector(
fprintfWrapper(payload, "%s\n", requestMethodVerb(req.Method)),
fprintfWrapper(payload, "%s\n", req.Host),
fprintfWrapper(payload, "%s\n", uriStr),
fprintfWrapper(payload, "%s", queryStr),
); err != nil {
return err
}
hash := hmac.New(sha256.New, []byte(auth.SecretKey))
hash.Write(payload.Bytes())
signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))
base64.StdEncoding.Encode(signature, hash.Sum(nil))
queryVals.Set("Signature", string(signature))
req.URL.RawQuery = queryVals.Encode()
return nil
}
// SignV4Factory returns a version 4 Signer which will utilize the
// given region name.
func SignV4Factory(regionName, serviceName string) Signer {
return func(req *http.Request, auth Auth) error {
return SignV4(req, auth, regionName, serviceName)
}
}
func SignV4URL(req *http.Request, auth Auth, regionName, svcName string, expires time.Duration) error {
reqTime, err := requestTime(req)
if err != nil {
return err
}
req.Header.Del("date")
credScope := credentialScope(reqTime, regionName, svcName)
queryVals := req.URL.Query()
queryVals.Set("X-Amz-Algorithm", "AWS4-HMAC-SHA256")
queryVals.Set("X-Amz-Credential", auth.AccessKey+"/"+credScope)
queryVals.Set("X-Amz-Date", reqTime.Format(ISO8601BasicFormat))
queryVals.Set("X-Amz-Expires", fmt.Sprintf("%d", int(expires.Seconds())))
queryVals.Set("X-Amz-SignedHeaders", "host")
req.URL.RawQuery = queryVals.Encode()
_, canonReqHash, _, err := canonicalRequest(req, sha256Hasher, false)
if err != nil {
return err
}
var strToSign string
if strToSign, err = stringToSign(reqTime, canonReqHash, credScope); err != nil {
return err
}
key := signingKey(reqTime, auth.SecretKey, regionName, svcName)
signature := fmt.Sprintf("%x", hmacHasher(key, strToSign))
debug.Printf("strToSign:\n\"\"\"\n%s\n\"\"\"", strToSign)
queryVals.Set("X-Amz-Signature", signature)
req.URL.RawQuery = queryVals.Encode()
return nil
}
// SignV4 signs an HTTP request utilizing version 4 of the AWS
// signature, and the given credentials.
func SignV4(req *http.Request, auth Auth, regionName, svcName string) (err error) {
var reqTime time.Time
if reqTime, err = requestTime(req); err != nil {
return err
}
// Remove any existing authorization headers as they will corrupt
// the signing.
delete(req.Header, "Authorization")
delete(req.Header, "authorization")
credScope := credentialScope(reqTime, regionName, svcName)
_, canonReqHash, sortedHdrNames, err := canonicalRequest(req, sha256Hasher, true)
if err != nil {
return err
}
var strToSign string
if strToSign, err = stringToSign(reqTime, canonReqHash, credScope); err != nil {
return err
}
key := signingKey(reqTime, auth.SecretKey, regionName, svcName)
signature := fmt.Sprintf("%x", hmacHasher(key, strToSign))
debug.Printf("strToSign:\n\"\"\"\n%s\n\"\"\"", strToSign)
var authHdrVal string
if authHdrVal, err = authHeaderString(
req.Header,
auth.AccessKey,
signature,
credScope,
sortedHdrNames,
); err != nil {
return err
}
req.Header.Set("Authorization", authHdrVal)
return nil
}
// Task 1: Create a Canonical Request.
// Returns the canonical request, and its hash.
func canonicalRequest(
req *http.Request,
hasher hasher,
calcPayHash bool,
) (canReq, canReqHash string, sortedHdrNames []string, err error) {
payHash := "UNSIGNED-PAYLOAD"
if calcPayHash {
if payHash, err = payloadHash(req, hasher); err != nil {
return
}
req.Header.Set("x-amz-content-sha256", payHash)
}
sortedHdrNames = sortHeaderNames(req.Header, "host")
var canHdr string
if canHdr, err = canonicalHeaders(sortedHdrNames, req.Host, req.Header); err != nil {
return
}
debug.Printf("canHdr:\n\"\"\"\n%s\n\"\"\"", canHdr)
debug.Printf("signedHeader: %s\n\n", strings.Join(sortedHdrNames, ";"))
uriStr := canonicalURI(req.URL)
queryStr := canonicalQueryString(req.URL.Query())
c := new(bytes.Buffer)
if err := errorCollector(
fprintfWrapper(c, "%s\n", requestMethodVerb(req.Method)),
fprintfWrapper(c, "%s\n", uriStr),
fprintfWrapper(c, "%s\n", queryStr),
fprintfWrapper(c, "%s\n", canHdr),
fprintfWrapper(c, "%s\n", strings.Join(sortedHdrNames, ";")),
fprintfWrapper(c, "%s", payHash),
); err != nil {
return "", "", nil, err
}
canReq = c.String()
debug.Printf("canReq:\n\"\"\"\n%s\n\"\"\"", canReq)
canReqHash, err = hasher(bytes.NewBuffer([]byte(canReq)))
return canReq, canReqHash, sortedHdrNames, err
}
// Task 2: Create a string to Sign
// Returns a string in the defined format to sign for the authorization header.
func stringToSign(
t time.Time,
canonReqHash string,
credScope string,
) (string, error) {
w := new(bytes.Buffer)
if err := errorCollector(
fprintfWrapper(w, "AWS4-HMAC-SHA256\n"),
fprintfWrapper(w, "%s\n", t.Format(ISO8601BasicFormat)),
fprintfWrapper(w, "%s\n", credScope),
fprintfWrapper(w, "%s", canonReqHash),
); err != nil {
return "", err
}
return w.String(), nil
}
// Task 3: Calculate the Signature
// Returns a derived signing key.
func signingKey(t time.Time, secretKey, regionName, svcName string) []byte {
kSecret := secretKey
kDate := hmacHasher([]byte("AWS4"+kSecret), t.Format(ISO8601BasicFormatShort))
kRegion := hmacHasher(kDate, regionName)
kService := hmacHasher(kRegion, svcName)
kSigning := hmacHasher(kService, "aws4_request")
return kSigning
}
// Task 4: Add the Signing Information to the Request
// Returns a string to be placed in the Authorization header for the request.
func authHeaderString(
header http.Header,
accessKey,
signature string,
credScope string,
sortedHeaderNames []string,
) (string, error) {
w := new(bytes.Buffer)
if err := errorCollector(
fprintfWrapper(w, "AWS4-HMAC-SHA256 "),
fprintfWrapper(w, "Credential=%s/%s, ", accessKey, credScope),
fprintfWrapper(w, "SignedHeaders=%s, ", strings.Join(sortedHeaderNames, ";")),
fprintfWrapper(w, "Signature=%s", signature),
); err != nil {
return "", err
}
return w.String(), nil
}
func canonicalURI(u *url.URL) string {
// The algorithm states that if the path is empty, to just use a "/".
if u.Path == "" {
return "/"
}
// Each path segment must be URI-encoded.
segments := strings.Split(u.Path, "/")
for i, segment := range segments {
segments[i] = goToAwsUrlEncoding(url.QueryEscape(segment))
}
return strings.Join(segments, "/")
}
func canonicalQueryString(queryVals url.Values) string {
// AWS dictates that if duplicate keys exist, their values be
// sorted as well.
for _, values := range queryVals {
sort.Strings(values)
}
return goToAwsUrlEncoding(queryVals.Encode())
}
func goToAwsUrlEncoding(urlEncoded string) string {
// AWS dictates that we use %20 for encoding spaces rather than +.
// All significant +s should already be encoded into their
// hexadecimal equivalents before doing the string replace.
return strings.Replace(urlEncoded, "+", "%20", -1)
}
func canonicalHeaders(sortedHeaderNames []string, host string, hdr http.Header) (string, error) {
buffer := new(bytes.Buffer)
for _, hName := range sortedHeaderNames {
hdrVals := host
if hName != "host" {
canonHdrKey := http.CanonicalHeaderKey(hName)
sortedHdrVals := hdr[canonHdrKey]
sort.Strings(sortedHdrVals)
hdrVals = strings.Join(sortedHdrVals, ",")
}
if _, err := fmt.Fprintf(buffer, "%s:%s\n", hName, hdrVals); err != nil {
return "", err
}
}
// There is intentionally a hanging newline at the end of the
// header list.
return buffer.String(), nil
}
// Returns a SHA256 checksum of the request body. Represented as a
// lowercase hexadecimal string.
func payloadHash(req *http.Request, hasher hasher) (string, error) {
if req.Body == nil {
return hasher(bytes.NewBuffer(nil))
}
return hasher(req.Body)
}
// Retrieve the header names, lower-case them, and sort them.
func sortHeaderNames(header http.Header, injectedNames ...string) []string {
sortedNames := injectedNames
for hName, _ := range header {
sortedNames = append(sortedNames, strings.ToLower(hName))
}
sort.Strings(sortedNames)
return sortedNames
}
func hmacHasher(key []byte, value string) []byte {
h := hmac.New(sha256.New, key)
h.Write([]byte(value))
return h.Sum(nil)
}
func sha256Hasher(payloadReader io.Reader) (string, error) {
hasher := sha256.New()
_, err := io.Copy(hasher, payloadReader)
return fmt.Sprintf("%x", hasher.Sum(nil)), err
}
func credentialScope(t time.Time, regionName, svcName string) string {
return fmt.Sprintf(
"%s/%s/%s/aws4_request",
t.Format(ISO8601BasicFormatShort),
regionName,
svcName,
)
}
// We do a lot of fmt.Fprintfs in this package. Create a higher-order
// function to elide the bytes written return value so we can submit
// these calls to an error collector.
func fprintfWrapper(w io.Writer, format string, vals ...interface{}) func() error {
return func() error {
_, err := fmt.Fprintf(w, format, vals...)
return err
}
}
// Poor man's maybe monad.
func errorCollector(writers ...func() error) error {
for _, writer := range writers {
if err := writer(); err != nil {
return err
}
}
return nil
}
// Retrieve the request time from the request. We will attempt to
// parse whatever we find, but we will not make up a request date for
// the user (i.e.: Magic!).
func requestTime(req *http.Request) (time.Time, error) {
// Time formats to try. We want to do everything we can to accept
// all time formats, but ultimately we may fail. In the package
// scope so it doesn't get initialized for every request.
var timeFormats = []string{
time.RFC822,
ISO8601BasicFormat,
time.RFC1123,
time.ANSIC,
time.UnixDate,
time.RubyDate,
time.RFC822Z,
time.RFC850,
time.RFC1123Z,
time.RFC3339,
time.RFC3339Nano,
time.Kitchen,
}
// Get a date header.
var date string
if date = req.Header.Get("x-amz-date"); date == "" {
if date = req.Header.Get("date"); date == "" {
return time.Time{}, fmt.Errorf(`Could not retrieve a request date. Please provide one in either "x-amz-date", or "date".`)
}
}
// Start attempting to parse
for _, format := range timeFormats {
if parsedTime, err := time.Parse(format, date); err == nil {
return parsedTime, nil
}
}
return time.Time{}, fmt.Errorf(
"Could not parse the given date. Please utilize one of the following formats: %s",
strings.Join(timeFormats, ","),
)
}
// http.Request's Method member returns the entire method. Derive the
// verb.
func requestMethodVerb(rawMethod string) (verb string) {
verbPlus := strings.SplitN(rawMethod, " ", 2)
switch {
case len(verbPlus) == 0: // Per docs, Method will be empty if it's GET.
verb = "GET"
default:
verb = verbPlus[0]
}
return verb
}

View File

@ -1,285 +0,0 @@
package aws
import (
"bytes"
"fmt"
"net/http"
"time"
. "gopkg.in/check.v1"
)
var _ = Suite(&SigningSuite{})
type SigningSuite struct{}
// TODO(katco-): The signing methodology is a "one size fits all"
// approach. The hashes we check against don't include headers that
// are added in as requisite parts for S3. That doesn't mean the tests
// are invalid, or that signing is broken for these examples, but as
// long as we're adding heads in, it's impossible to know what the new
// signature should be. We should revaluate these later. See:
// https://github.com/go-amz/amz/issues/29
const v4skipReason = `Extra headers present - cannot predict generated signature (issue #29).`
// EC2 ReST authentication docs: http://goo.gl/fQmAN
var testAuth = Auth{"user", "secret"}
func (s *SigningSuite) TestV4SignedUrl(c *C) {
auth := Auth{"AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"}
req, err := http.NewRequest("GET", "https://examplebucket.s3.amazonaws.com/test.txt", nil)
req.Header.Add("date", "Fri, 24 May 2013 00:00:00 GMT")
c.Assert(err, IsNil)
err = SignV4URL(req, auth, USEast.Name, "s3", 86400*time.Second)
c.Assert(err, IsNil)
c.Check(req.URL.String(), Equals, "https://examplebucket.s3.amazonaws.com/test.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-Signature=aeeed9bbccd4d02ee5c0109b86d86835f995330da4c265957d157751f604d404&X-Amz-SignedHeaders=host")
}
func (s *SigningSuite) TestV4SignedUrlReserved(c *C) {
auth := Auth{"AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"}
req, err := http.NewRequest("GET", "https://examplebucket.s3.amazonaws.com/some:reserved,characters", nil)
req.Header.Add("date", "Fri, 24 May 2013 00:00:00 GMT")
c.Assert(err, IsNil)
err = SignV4URL(req, auth, USEast.Name, "s3", 86400*time.Second)
c.Assert(err, IsNil)
c.Check(req.URL.String(), Equals, "https://examplebucket.s3.amazonaws.com/some:reserved,characters?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-Signature=ac81e03593d6fc22ac045b9353b0242da755be2af80b981eb13917d8b9cf20a4&X-Amz-SignedHeaders=host")
}
func (s *SigningSuite) TestV4StringToSign(c *C) {
mockTime, err := time.Parse(time.RFC3339, "2011-09-09T23:36:00Z")
c.Assert(err, IsNil)
stringToSign, err := stringToSign(
mockTime,
"3511de7e95d28ecd39e9513b642aee07e54f4941150d8df8bf94b328ef7e55e2",
"20110909/us-east-1/iam/aws4_request",
)
c.Assert(err, IsNil)
const expected = `AWS4-HMAC-SHA256
20110909T233600Z
20110909/us-east-1/iam/aws4_request
3511de7e95d28ecd39e9513b642aee07e54f4941150d8df8bf94b328ef7e55e2`
c.Assert(stringToSign, Equals, expected)
}
func (s *SigningSuite) TestV4CanonicalRequest(c *C) {
c.Skip(v4skipReason)
body := new(bytes.Buffer)
_, err := fmt.Fprint(body, "Action=ListUsers&Version=2010-05-08")
c.Assert(err, IsNil)
req, err := http.NewRequest("POST", "https://iam.amazonaws.com", body)
c.Assert(err, IsNil)
req.Header.Add("content-type", "application/x-www-form-urlencoded; charset=utf-8")
req.Header.Add("host", req.URL.Host)
req.Header.Add("x-amz-date", "20110909T233600Z")
canonReq, canonReqHash, _, err := canonicalRequest(
req,
sha256Hasher,
true,
)
c.Assert(err, IsNil)
const expected = `POST
/
content-type:application/x-www-form-urlencoded; charset=utf-8
host:iam.amazonaws.com
x-amz-date:20110909T233600Z
content-type;host;x-amz-date
b6359072c78d70ebee1e81adcbab4f01bf2c23245fa365ef83fe8f1f955085e2`
c.Assert(canonReq, Equals, expected)
c.Assert(canonReqHash, Equals, "3511de7e95d28ecd39e9513b642aee07e54f4941150d8df8bf94b328ef7e55e2")
}
func (s *SigningSuite) TestV4SigningKey(c *C) {
c.Skip(v4skipReason)
mockTime, err := time.Parse(time.RFC3339, "2011-09-09T23:36:00Z")
c.Assert(err, IsNil)
c.Assert(
fmt.Sprintf("%v", signingKey(mockTime, testAuth.SecretKey, USEast.Name, "iam")),
Equals,
"[152 241 216 137 254 196 244 66 26 220 82 43 171 12 225 248 46 105 41 194 98 237 21 229 169 76 144 239 209 227 176 231]")
}
func (s *SigningSuite) TestV4BasicSignatureV4(c *C) {
c.Skip(v4skipReason)
body := new(bytes.Buffer)
req, err := http.NewRequest("POST / http/1.1", "https://host.foo.com", body)
c.Assert(err, IsNil)
req.Header.Add("Host", req.URL.Host)
req.Header.Add("Date", "Mon, 09 Sep 2011 23:36:00 GMT")
testAuth := Auth{
AccessKey: "AKIDEXAMPLE",
SecretKey: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
}
err = SignV4(req, testAuth, USEast.Name, "host")
c.Assert(err, IsNil)
c.Assert(req.Header.Get("Authorization"), Equals, `AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request,SignedHeaders=date;host,Signature=22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726`)
}
func (s *SigningSuite) TestV4MoreCompleteSignature(c *C) {
req, err := http.NewRequest("GET", "https://examplebucket.s3.amazonaws.com/test.txt", nil)
c.Assert(err, IsNil)
req.Header.Set("x-amz-date", "20130524T000000Z")
req.Header.Set("Range", "bytes=0-9")
testAuth := Auth{
AccessKey: "AKIAIOSFODNN7EXAMPLE",
SecretKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
}
err = SignV4(req, testAuth, USEast.Name, "s3")
c.Assert(err, IsNil)
c.Check(req.Header.Get("Authorization"), Equals, "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request, SignedHeaders=host;range;x-amz-content-sha256;x-amz-date, Signature=f0e8bdb87c964420e857bd35b5d6ed310bd44f0170aba48dd91039c6036bdb41")
}
//
// v2 Tests
//
func (s *SigningSuite) TestV2BasicSignature(c *C) {
req, err := http.NewRequest("GET", "http://localhost/path", nil)
c.Assert(err, IsNil)
SignV2(req, testAuth)
query := req.URL.Query()
c.Assert(query.Get("SignatureVersion"), Equals, "2")
c.Assert(query.Get("SignatureMethod"), Equals, "HmacSHA256")
expected := "6lSe5QyXum0jMVc7cOUz32/52ZnL7N5RyKRk/09yiK4="
c.Assert(query.Get("Signature"), Equals, expected)
}
func (s *SigningSuite) TestV2ParamSignature(c *C) {
req, err := http.NewRequest("GET", "http://localhost/path", nil)
c.Assert(err, IsNil)
query := req.URL.Query()
for i := 1; i <= 3; i++ {
query.Add(fmt.Sprintf("param%d", i), fmt.Sprintf("value%d", i))
}
req.URL.RawQuery = query.Encode()
SignV2(req, testAuth)
expected := "XWOR4+0lmK8bD8CGDGZ4kfuSPbb2JibLJiCl/OPu1oU="
c.Assert(req.URL.Query().Get("Signature"), Equals, expected)
}
func (s *SigningSuite) TestV2ManyParams(c *C) {
req, err := http.NewRequest("GET", "http://localhost/path", nil)
c.Assert(err, IsNil)
query := req.URL.Query()
orderedVals := []int{10, 2, 3, 4, 5, 6, 7, 8, 9, 1}
for i, val := range orderedVals {
query.Add(fmt.Sprintf("param%d", i+1), fmt.Sprintf("value%d", val))
}
req.URL.RawQuery = query.Encode()
SignV2(req, testAuth)
expected := "di0sjxIvezUgQ1SIL6i+C/H8lL+U0CQ9frLIak8jkVg="
c.Assert(req.URL.Query().Get("Signature"), Equals, expected)
}
func (s *SigningSuite) TestV2Escaping(c *C) {
req, err := http.NewRequest("GET", "http://localhost/path", nil)
c.Assert(err, IsNil)
query := req.URL.Query()
query.Add("Nonce", "+ +")
req.URL.RawQuery = query.Encode()
err = SignV2(req, testAuth)
c.Assert(err, IsNil)
query = req.URL.Query()
c.Assert(query.Get("Nonce"), Equals, "+ +")
expected := "bqffDELReIqwjg/W0DnsnVUmfLK4wXVLO4/LuG+1VFA="
c.Assert(query.Get("Signature"), Equals, expected)
}
func (s *SigningSuite) TestV2SignatureExample1(c *C) {
req, err := http.NewRequest("GET", "http://sdb.amazonaws.com/", nil)
c.Assert(err, IsNil)
query := req.URL.Query()
query.Add("Timestamp", "2009-02-01T12:53:20+00:00")
query.Add("Version", "2007-11-07")
query.Add("Action", "ListDomains")
req.URL.RawQuery = query.Encode()
SignV2(req, Auth{"access", "secret"})
expected := "okj96/5ucWBSc1uR2zXVfm6mDHtgfNv657rRtt/aunQ="
c.Assert(req.URL.Query().Get("Signature"), Equals, expected)
}
// Tests example from:
// http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html
// Specifically, good for testing case when URL does not contain a /
func (s *SigningSuite) TestV2SignatureTutorialExample(c *C) {
req, err := http.NewRequest("GET", "https://elasticmapreduce.amazonaws.com/", nil)
c.Assert(err, IsNil)
query := req.URL.Query()
query.Add("Timestamp", "2011-10-03T15:19:30")
query.Add("Version", "2009-03-31")
query.Add("Action", "DescribeJobFlows")
req.URL.RawQuery = query.Encode()
testAuth := Auth{"AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"}
err = SignV2(req, testAuth)
c.Assert(err, IsNil)
c.Assert(req.URL.Query().Get("Signature"), Equals, "i91nKc4PWAt0JJIdXwz9HxZCJDdiy6cf/Mj6vPxyYIs=")
}
// https://bugs.launchpad.net/goamz/+bug/1022749
func (s *SigningSuite) TestSignatureWithEndpointPath(c *C) {
req, err := http.NewRequest("GET", "http://localhost:4444/services/Cloud", nil)
c.Assert(err, IsNil)
queryStr := req.URL.Query()
queryStr.Add("Action", "RebootInstances")
queryStr.Add("Version", "2011-12-15")
queryStr.Add("InstanceId.1", "i-10a64379")
queryStr.Add("Timestamp", time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).In(time.UTC).Format(time.RFC3339))
req.URL.RawQuery = queryStr.Encode()
err = SignV2(req, Auth{"abc", "123"})
c.Assert(err, IsNil)
c.Assert(req.URL.Query().Get("Signature"), Equals, "gdG/vEm+c6ehhhfkrJy3+wuVzw/rzKR42TYelMwti7M=")
err = req.ParseForm()
c.Assert(err, IsNil)
c.Assert(req.Form["Signature"], DeepEquals, []string{"gdG/vEm+c6ehhhfkrJy3+wuVzw/rzKR42TYelMwti7M="})
}

View File

@ -1,33 +0,0 @@
package s3
import (
"net/http"
"gopkg.in/amz.v3/aws"
)
var originalStrategy = attempts
func BuildError(resp *http.Response) error {
return buildError(resp)
}
func SetAttemptStrategy(s *aws.AttemptStrategy) {
if s == nil {
attempts = originalStrategy
} else {
attempts = *s
}
}
func AttemptStrategy() aws.AttemptStrategy {
return attempts
}
func SetListPartsMax(n int) {
listPartsMax = n
}
func SetListMultiMax(n int) {
listMultiMax = n
}

View File

@ -1,502 +0,0 @@
package s3
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/xml"
"errors"
"io"
"net/http"
"sort"
"strconv"
)
// Multi represents an unfinished multipart upload.
//
// Multipart uploads allow sending big objects in smaller chunks.
// After all parts have been sent, the upload must be explicitly
// completed by calling Complete with the list of parts.
//
// See http://goo.gl/vJfTG for an overview of multipart uploads.
type Multi struct {
Bucket *Bucket
Key string
UploadId string
}
// That's the default. Here just for testing.
var listMultiMax = 1000
type listMultiResp struct {
NextKeyMarker string
NextUploadIdMarker string
IsTruncated bool
Upload []Multi
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
}
// ListMulti returns the list of unfinished multipart uploads in b.
//
// The prefix parameter limits the response to keys that begin with the
// specified prefix. You can use prefixes to separate a bucket into different
// groupings of keys (to get the feeling of folders, for example).
//
// The delim parameter causes the response to group all of the keys that
// share a common prefix up to the next delimiter in a single entry within
// the CommonPrefixes field. You can use delimiters to separate a bucket
// into different groupings of keys, similar to how folders would work.
//
// See http://goo.gl/ePioY for details.
func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) {
req, err := http.NewRequest("GET", b.Region.ResolveS3BucketEndpoint(b.Name), nil)
if err != nil {
return nil, nil, err
}
query := req.URL.Query()
query.Add("uploads", "")
query.Add("max-uploads", strconv.FormatInt(int64(listMultiMax), 10))
query.Add("prefix", prefix)
query.Add("delimiter", delim)
req.URL.RawQuery = query.Encode()
addAmazonDateHeader(req.Header)
// We need to resign every iteration because we're changing variables.
if err := b.S3.Sign(req, b.Auth); err != nil {
return nil, nil, err
}
for attempt := attempts.Start(); attempt.Next(); {
resp, err := requestRetryLoop(req, attempts)
if err == nil {
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
err = buildError(resp)
}
}
if err != nil {
if shouldRetry(err) && attempt.HasNext() {
continue
}
return nil, nil, err
}
var multiResp listMultiResp
if err := xml.NewDecoder(resp.Body).Decode(&multiResp); err != nil {
return nil, nil, err
}
resp.Body.Close()
for i := range multiResp.Upload {
multi := &multiResp.Upload[i]
multi.Bucket = b
multis = append(multis, multi)
}
prefixes = append(prefixes, multiResp.CommonPrefixes...)
if !multiResp.IsTruncated {
return multis, prefixes, nil
}
query := req.URL.Query()
query.Set("key-marker", multiResp.NextKeyMarker)
query.Set("upload-id-marker", multiResp.NextUploadIdMarker)
req.URL.RawQuery = query.Encode()
// Last request worked; restart our counter.
attempt = attempts.Start()
}
panic("unreachable")
}
// Multi returns a multipart upload handler for the provided key
// inside b. If a multipart upload exists for key, it is returned,
// otherwise a new multipart upload is initiated with contType and perm.
func (b *Bucket) Multi(key, contType string, perm ACL) (*Multi, error) {
multis, _, err := b.ListMulti(key, "")
if err != nil && !hasCode(err, "NoSuchUpload") {
return nil, err
}
for _, m := range multis {
if m.Key == key {
return m, nil
}
}
return b.InitMulti(key, contType, perm)
}
// InitMulti initializes a new multipart upload at the provided
// key inside b and returns a value for manipulating it.
//
// See http://goo.gl/XP8kL for details.
func (b *Bucket) InitMulti(key string, contType string, perm ACL) (*Multi, error) {
req, err := http.NewRequest("POST", b.Region.ResolveS3BucketEndpoint(b.Name), nil)
if err != nil {
return nil, err
}
req.URL.Path += key
query := req.URL.Query()
query.Add("uploads", "")
req.URL.RawQuery = query.Encode()
req.Header.Add("Content-Type", contType)
req.Header.Add("Content-Length", "0")
req.Header.Add("x-amz-acl", string(perm))
addAmazonDateHeader(req.Header)
if err := b.S3.Sign(req, b.Auth); err != nil {
return nil, err
}
resp, err := requestRetryLoop(req, attempts)
if err == nil {
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
err = buildError(resp)
}
}
if err != nil {
return nil, err
}
var multiResp struct {
UploadId string `xml:"UploadId"`
}
if err := xml.NewDecoder(resp.Body).Decode(&multiResp); err != nil {
return nil, err
}
return &Multi{Bucket: b, Key: key, UploadId: multiResp.UploadId}, nil
}
// PutPart sends part n of the multipart upload, reading all the content from r.
// Each part, except for the last one, must be at least 5MB in size.
//
// See http://goo.gl/pqZer for details.
func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) {
partSize, _, md5b64, err := seekerInfo(r)
if err != nil {
return Part{}, err
}
return m.putPart(n, r, partSize, md5b64)
}
func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) {
_, err := r.Seek(0, 0)
if err != nil {
return Part{}, err
}
req, err := http.NewRequest("PUT", m.Bucket.Region.ResolveS3BucketEndpoint(m.Bucket.Name), r)
if err != nil {
return Part{}, err
}
req.Close = true
req.URL.Path += m.Key
req.ContentLength = partSize
query := req.URL.Query()
query.Add("uploadId", m.UploadId)
query.Add("partNumber", strconv.FormatInt(int64(n), 10))
req.URL.RawQuery = query.Encode()
req.Header.Add("Content-MD5", md5b64)
addAmazonDateHeader(req.Header)
if err := m.Bucket.S3.Sign(req, m.Bucket.Auth); err != nil {
return Part{}, err
}
// Signing may read the request body.
if _, err := r.Seek(0, 0); err != nil {
return Part{}, err
}
resp, err := requestRetryLoop(req, attempts)
defer resp.Body.Close()
if err != nil {
return Part{}, err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return Part{}, buildError(resp)
}
part := Part{n, resp.Header.Get("ETag"), partSize}
if part.ETag == "" {
return Part{}, errors.New("part upload succeeded with no ETag")
}
return part, nil
}
func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) {
_, err = r.Seek(0, 0)
if err != nil {
return 0, "", "", err
}
digest := md5.New()
size, err = io.Copy(digest, r)
if err != nil {
return 0, "", "", err
}
sum := digest.Sum(nil)
md5hex = hex.EncodeToString(sum)
md5b64 = base64.StdEncoding.EncodeToString(sum)
return size, md5hex, md5b64, nil
}
type Part struct {
N int `xml:"PartNumber"`
ETag string
Size int64
}
type partSlice []Part
func (s partSlice) Len() int { return len(s) }
func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N }
func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
type listPartsResp struct {
NextPartNumberMarker string
IsTruncated bool
Part []Part
}
// That's the default. Here just for testing.
var listPartsMax = 1000
// ListParts returns the list of previously uploaded parts in m,
// ordered by part number.
//
// See http://goo.gl/ePioY for details.
func (m *Multi) ListParts() ([]Part, error) {
req, err := http.NewRequest("GET", m.Bucket.Region.ResolveS3BucketEndpoint(m.Bucket.Name), nil)
if err != nil {
return nil, err
}
req.Close = true
req.URL.Path += m.Key
query := req.URL.Query()
query.Add("uploadId", m.UploadId)
query.Add("max-parts", strconv.FormatInt(int64(listPartsMax), 10))
req.URL.RawQuery = query.Encode()
var parts partSlice
for attempt := attempts.Start(); attempt.Next(); {
addAmazonDateHeader(req.Header)
// We need to resign every iteration because we're changing the URL.
if err := m.Bucket.S3.Sign(req, m.Bucket.Auth); err != nil {
return nil, err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
} else if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
err = buildError(resp)
}
if err != nil {
if shouldRetry(err) && attempt.HasNext() {
continue
}
return nil, err
}
var listResp listPartsResp
if err := xml.NewDecoder(resp.Body).Decode(&listResp); err != nil {
return nil, err
}
parts = append(parts, listResp.Part...)
if listResp.IsTruncated == false {
break
}
query.Set("part-number-marker", listResp.NextPartNumberMarker)
req.URL.RawQuery = query.Encode()
// Last request worked; restart our counter.
attempt = attempts.Start()
}
sort.Sort(parts)
return parts, nil
}
type ReaderAtSeeker interface {
io.ReaderAt
io.ReadSeeker
}
// PutAll sends all of r via a multipart upload with parts no larger
// than partSize bytes, which must be set to at least 5MB.
// Parts previously uploaded are either reused if their checksum
// and size match the new part, or otherwise overwritten with the
// new content.
// PutAll returns all the parts of m (reused or not).
func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) {
old, err := m.ListParts()
if err != nil && !hasCode(err, "NoSuchUpload") {
return nil, err
}
reuse := 0 // Index of next old part to consider reusing.
current := 1 // Part number of latest good part handled.
totalSize, err := r.Seek(0, 2)
if err != nil {
return nil, err
}
first := true // Must send at least one empty part if the file is empty.
var result []Part
NextSection:
for offset := int64(0); offset < totalSize || first; offset += partSize {
first = false
if offset+partSize > totalSize {
partSize = totalSize - offset
}
section := io.NewSectionReader(r, offset, partSize)
_, md5hex, md5b64, err := seekerInfo(section)
if err != nil {
return nil, err
}
for reuse < len(old) && old[reuse].N <= current {
// Looks like this part was already sent.
part := &old[reuse]
etag := `"` + md5hex + `"`
if part.N == current && part.Size == partSize && part.ETag == etag {
// Checksum matches. Reuse the old part.
result = append(result, *part)
current++
continue NextSection
}
reuse++
}
// Part wasn't found or doesn't match. Send it.
part, err := m.putPart(current, section, partSize, md5b64)
if err != nil {
return nil, err
}
result = append(result, part)
current++
}
return result, nil
}
type completeUpload struct {
XMLName xml.Name `xml:"CompleteMultipartUpload"`
Parts completeParts `xml:"Part"`
}
type completePart struct {
PartNumber int
ETag string
}
type completeParts []completePart
func (p completeParts) Len() int { return len(p) }
func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber }
func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// Complete assembles the given previously uploaded parts into the
// final object. This operation may take several minutes.
//
// See http://goo.gl/2Z7Tw for details.
func (m *Multi) Complete(parts []Part) error {
var c completeUpload
for _, p := range parts {
c.Parts = append(c.Parts, completePart{p.N, p.ETag})
}
sort.Sort(c.Parts)
data, err := xml.Marshal(&c)
if err != nil {
return err
}
body := bytes.NewReader(data)
req, err := http.NewRequest(
"POST",
m.Bucket.Region.ResolveS3BucketEndpoint(m.Bucket.Name),
body,
)
if err != nil {
return err
}
req.Close = true
req.ContentLength = int64(len(data))
req.URL.Path += m.Key
query := req.URL.Query()
query.Add("uploadId", m.UploadId)
req.URL.RawQuery = query.Encode()
addAmazonDateHeader(req.Header)
if err := m.Bucket.S3.Sign(req, m.Bucket.Auth); err != nil {
return err
}
// Signing may read the request body.
if _, err := body.Seek(0, 0); err != nil {
return err
}
resp, err := requestRetryLoop(req, attempts)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return buildError(resp)
}
return nil
}
// Abort deletes an unifinished multipart upload and any previously
// uploaded parts for it.
//
// After a multipart upload is aborted, no additional parts can be
// uploaded using it. However, if any part uploads are currently in
// progress, those part uploads might or might not succeed. As a result,
// it might be necessary to abort a given multipart upload multiple
// times in order to completely free all storage consumed by all parts.
//
// NOTE: If the described scenario happens to you, please report back to
// the goamz authors with details. In the future such retrying should be
// handled internally, but it's not clear what happens precisely (Is an
// error returned? Is the issue completely undetectable?).
//
// See http://goo.gl/dnyJw for details.
func (m *Multi) Abort() error {
req, err := http.NewRequest("DELETE", m.Bucket.Region.ResolveS3BucketEndpoint(m.Bucket.Name), nil)
if err != nil {
return nil
}
req.URL.Path += m.Key
query := req.URL.Query()
query.Add("uploadId", m.UploadId)
req.URL.RawQuery = query.Encode()
addAmazonDateHeader(req.Header)
if err := m.Bucket.S3.Sign(req, m.Bucket.Auth); err != nil {
return err
}
_, err = requestRetryLoop(req, attempts)
return err
}

View File

@ -1,383 +0,0 @@
package s3_test
import (
"encoding/xml"
"io"
"io/ioutil"
"strings"
. "gopkg.in/check.v1"
"gopkg.in/amz.v3/s3"
)
func (s *S) TestInitMulti(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "POST")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"text/plain"})
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--")
}
func (s *S) TestMultiNoPreviousUpload(c *C) {
// Don't retry the NoSuchUpload error.
s3.RetryAttempts(false)
testServer.Response(404, nil, NoSuchUploadErrorDump)
testServer.Response(200, nil, InitMultiResultDump)
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.Multi("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/")
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(req.Form["prefix"], DeepEquals, []string{"multi"})
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "POST")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--")
}
func (s *S) TestMultiReturnOld(c *C) {
testServer.Response(200, nil, ListMultiResultDump)
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.Multi("multi1", "text/plain", s3.Private)
c.Assert(err, IsNil)
c.Assert(multi.Key, Equals, "multi1")
c.Assert(multi.UploadId, Equals, "iUVug89pPvSswrikD")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/")
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(req.Form["prefix"], DeepEquals, []string{"multi1"})
}
func (s *S) TestListParts(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(200, nil, ListPartsResultDump1)
testServer.Response(404, nil, NoSuchUploadErrorDump) // :-(
testServer.Response(200, nil, ListPartsResultDump2)
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
parts, err := multi.ListParts()
c.Assert(err, IsNil)
c.Assert(parts, HasLen, 3)
c.Assert(parts[0].N, Equals, 1)
c.Assert(parts[0].Size, Equals, int64(5))
c.Assert(parts[0].ETag, Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`)
c.Assert(parts[1].N, Equals, 2)
c.Assert(parts[1].Size, Equals, int64(5))
c.Assert(parts[1].ETag, Equals, `"d067a0fa9dc61a6e7195ca99696b5a89"`)
c.Assert(parts[2].N, Equals, 3)
c.Assert(parts[2].Size, Equals, int64(5))
c.Assert(parts[2].ETag, Equals, `"49dcd91231f801159e893fb5c6674985"`)
testServer.WaitRequest()
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
c.Assert(req.Form["max-parts"], DeepEquals, []string{"1000"})
testServer.WaitRequest() // The internal error.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
c.Assert(req.Form["max-parts"], DeepEquals, []string{"1000"})
c.Assert(req.Form["part-number-marker"], DeepEquals, []string{"2"})
}
func (s *S) TestPutPart(c *C) {
headers := map[string]string{
"ETag": `"26f90efd10d614f100252ff56d88dad8"`,
}
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(200, headers, "")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
part, err := multi.PutPart(1, strings.NewReader("<part 1>"))
c.Assert(err, IsNil)
c.Assert(part.N, Equals, 1)
c.Assert(part.Size, Equals, int64(8))
c.Assert(part.ETag, Equals, headers["ETag"])
testServer.WaitRequest()
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"8"})
c.Assert(req.Header["Content-Md5"], DeepEquals, []string{"JvkO/RDWFPEAJS/1bYja2A=="})
}
func readAll(r io.Reader) string {
data, err := ioutil.ReadAll(r)
if err != nil {
panic(err)
}
return string(data)
}
func (s *S) TestPutAllNoPreviousUpload(c *C) {
// Don't retry the NoSuchUpload error.
s3.RetryAttempts(false)
etag1 := map[string]string{"ETag": `"etag1"`}
etag2 := map[string]string{"ETag": `"etag2"`}
etag3 := map[string]string{"ETag": `"etag3"`}
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(404, nil, NoSuchUploadErrorDump)
testServer.Response(200, etag1, "")
testServer.Response(200, etag2, "")
testServer.Response(200, etag3, "")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
parts, err := multi.PutAll(strings.NewReader("part1part2last"), 5)
c.Assert(err, IsNil)
c.Assert(parts, HasLen, 3)
c.Check(parts[0].ETag, Equals, `"etag1"`)
c.Check(parts[1].ETag, Equals, `"etag2"`)
c.Check(parts[2].ETag, Equals, `"etag3"`)
// Init
testServer.WaitRequest()
// List old parts. Won't find anything.
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
// Send part 1.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"})
c.Assert(readAll(req.Body), Equals, "part1")
// Send part 2.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"})
c.Assert(readAll(req.Body), Equals, "part2")
// Send part 3 with shorter body.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"3"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"4"})
c.Assert(readAll(req.Body), Equals, "last")
}
func (s *S) TestPutAllZeroSizeFile(c *C) {
// Don't retry the NoSuchUpload error.
s3.RetryAttempts(false)
etag1 := map[string]string{"ETag": `"etag1"`}
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(404, nil, NoSuchUploadErrorDump)
testServer.Response(200, etag1, "")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
// Must send at least one part, so that completing it will work.
parts, err := multi.PutAll(strings.NewReader(""), 5)
c.Assert(parts, HasLen, 1)
c.Assert(parts[0].ETag, Equals, `"etag1"`)
c.Assert(err, IsNil)
// Init
testServer.WaitRequest()
// List old parts. Won't find anything.
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
// Send empty part.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"0"})
c.Assert(readAll(req.Body), Equals, "")
}
func (s *S) TestPutAllResume(c *C) {
etag2 := map[string]string{"ETag": `"etag2"`}
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(200, nil, ListPartsResultDump1)
testServer.Response(200, nil, ListPartsResultDump2)
testServer.Response(200, etag2, "")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
// "part1" and "part3" match the checksums in ResultDump1.
// The middle one is a mismatch (it refers to "part2").
parts, err := multi.PutAll(strings.NewReader("part1partXpart3"), 5)
c.Assert(parts, HasLen, 3)
c.Assert(parts[0].N, Equals, 1)
c.Assert(parts[0].Size, Equals, int64(5))
c.Assert(parts[0].ETag, Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`)
c.Assert(parts[1].N, Equals, 2)
c.Assert(parts[1].Size, Equals, int64(5))
c.Assert(parts[1].ETag, Equals, `"etag2"`)
c.Assert(parts[2].N, Equals, 3)
c.Assert(parts[2].Size, Equals, int64(5))
c.Assert(parts[2].ETag, Equals, `"49dcd91231f801159e893fb5c6674985"`)
c.Assert(err, IsNil)
// Init
testServer.WaitRequest()
// List old parts, broken in two requests.
for i := 0; i < 2; i++ {
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
}
// Send part 2, as it didn't match the checksum.
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"})
c.Assert(readAll(req.Body), Equals, "partX")
}
func (s *S) TestMultiComplete(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
// Note the 200 response. Completing will hold the connection on some
// kind of long poll, and may return a late error even after a 200.
testServer.Response(200, nil, InternalErrorDump)
testServer.Response(200, nil, "")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
err = multi.Complete([]s3.Part{{2, `"ETag2"`, 32}, {1, `"ETag1"`, 64}})
c.Assert(err, IsNil)
// Grab the 2nd request.
req := testServer.WaitRequests(2)[1]
c.Assert(req.Method, Equals, "POST")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
var payload struct {
XMLName xml.Name
Part []struct {
PartNumber int
ETag string
}
}
err = xml.NewDecoder(req.Body).Decode(&payload)
c.Assert(err, IsNil)
c.Assert(payload.XMLName.Local, Equals, "CompleteMultipartUpload")
c.Assert(len(payload.Part), Equals, 2)
c.Assert(payload.Part[0].PartNumber, Equals, 1)
c.Assert(payload.Part[0].ETag, Equals, `"ETag1"`)
c.Assert(payload.Part[1].PartNumber, Equals, 2)
c.Assert(payload.Part[1].ETag, Equals, `"ETag2"`)
}
func (s *S) TestMultiAbort(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(200, nil, "")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
err = multi.Abort()
c.Assert(err, IsNil)
testServer.WaitRequest()
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "DELETE")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
}
func (s *S) TestListMulti(c *C) {
testServer.Response(200, nil, ListMultiResultDump)
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multis, prefixes, err := b.ListMulti("", "/")
c.Assert(err, IsNil)
c.Assert(prefixes, DeepEquals, []string{"a/", "b/"})
c.Assert(multis, HasLen, 2)
c.Assert(multis[0].Key, Equals, "multi1")
c.Assert(multis[0].UploadId, Equals, "iUVug89pPvSswrikD")
c.Assert(multis[1].Key, Equals, "multi2")
c.Assert(multis[1].UploadId, Equals, "DkirwsSvPp98guVUi")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/")
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(req.Form["prefix"], DeepEquals, []string{""})
c.Assert(req.Form["delimiter"], DeepEquals, []string{"/"})
c.Assert(req.Form["max-uploads"], DeepEquals, []string{"1000"})
}

View File

@ -1,198 +0,0 @@
package s3_test
var GetObjectErrorDump = `
<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>NoSuchBucket</Code><Message>The specified bucket does not exist</Message>
<BucketName>non-existent-bucket</BucketName><RequestId>3F1B667FAD71C3D8</RequestId>
<HostId>L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D</HostId></Error>
`
var GetListResultDump1 = `
<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>quotes</Name>
<Prefix>N</Prefix>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>Nelson</Key>
<LastModified>2006-01-01T12:00:00.000Z</LastModified>
<ETag>&quot;828ef3fdfa96f00ad9f27c383fc9ac7f&quot;</ETag>
<Size>5</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>bcaf161ca5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
<Contents>
<Key>Neo</Key>
<LastModified>2006-01-01T12:00:00.000Z</LastModified>
<ETag>&quot;828ef3fdfa96f00ad9f27c383fc9ac7f&quot;</ETag>
<Size>4</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>bcaf1ffd86a5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
</ListBucketResult>
`
var GetListResultDump2 = `
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>example-bucket</Name>
<Prefix>photos/2006/</Prefix>
<Marker>some-marker</Marker>
<MaxKeys>1000</MaxKeys>
<Delimiter>/</Delimiter>
<IsTruncated>false</IsTruncated>
<CommonPrefixes>
<Prefix>photos/2006/feb/</Prefix>
</CommonPrefixes>
<CommonPrefixes>
<Prefix>photos/2006/jan/</Prefix>
</CommonPrefixes>
</ListBucketResult>
`
var InitMultiResultDump = `
<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>sample</Bucket>
<Key>multi</Key>
<UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
</InitiateMultipartUploadResult>
`
var ListPartsResultDump1 = `
<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>sample</Bucket>
<Key>multi</Key>
<UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
<Initiator>
<ID>bb5c0f63b0b25f2d099c</ID>
<DisplayName>joe</DisplayName>
</Initiator>
<Owner>
<ID>bb5c0f63b0b25f2d099c</ID>
<DisplayName>joe</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<PartNumberMarker>0</PartNumberMarker>
<NextPartNumberMarker>2</NextPartNumberMarker>
<MaxParts>2</MaxParts>
<IsTruncated>true</IsTruncated>
<Part>
<PartNumber>1</PartNumber>
<LastModified>2013-01-30T13:45:51.000Z</LastModified>
<ETag>&quot;ffc88b4ca90a355f8ddba6b2c3b2af5c&quot;</ETag>
<Size>5</Size>
</Part>
<Part>
<PartNumber>2</PartNumber>
<LastModified>2013-01-30T13:45:52.000Z</LastModified>
<ETag>&quot;d067a0fa9dc61a6e7195ca99696b5a89&quot;</ETag>
<Size>5</Size>
</Part>
</ListPartsResult>
`
var ListPartsResultDump2 = `
<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>sample</Bucket>
<Key>multi</Key>
<UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
<Initiator>
<ID>bb5c0f63b0b25f2d099c</ID>
<DisplayName>joe</DisplayName>
</Initiator>
<Owner>
<ID>bb5c0f63b0b25f2d099c</ID>
<DisplayName>joe</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<PartNumberMarker>2</PartNumberMarker>
<NextPartNumberMarker>3</NextPartNumberMarker>
<MaxParts>2</MaxParts>
<IsTruncated>false</IsTruncated>
<Part>
<PartNumber>3</PartNumber>
<LastModified>2013-01-30T13:46:50.000Z</LastModified>
<ETag>&quot;49dcd91231f801159e893fb5c6674985&quot;</ETag>
<Size>5</Size>
</Part>
</ListPartsResult>
`
var ListMultiResultDump = `
<?xml version="1.0"?>
<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>goamz-test-bucket-us-east-1-akiajk3wyewhctyqbf7a</Bucket>
<KeyMarker/>
<UploadIdMarker/>
<NextKeyMarker>multi1</NextKeyMarker>
<NextUploadIdMarker>iUVug89pPvSswrikD72p8uO62EzhNtpDxRmwC5WSiWDdK9SfzmDqe3xpP1kMWimyimSnz4uzFc3waVM5ufrKYQ--</NextUploadIdMarker>
<Delimiter>/</Delimiter>
<MaxUploads>1000</MaxUploads>
<IsTruncated>false</IsTruncated>
<Upload>
<Key>multi1</Key>
<UploadId>iUVug89pPvSswrikD</UploadId>
<Initiator>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>gustavoniemeyer</DisplayName>
</Initiator>
<Owner>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>gustavoniemeyer</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2013-01-30T18:15:47.000Z</Initiated>
</Upload>
<Upload>
<Key>multi2</Key>
<UploadId>DkirwsSvPp98guVUi</UploadId>
<Initiator>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>joe</DisplayName>
</Initiator>
<Owner>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>joe</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2013-01-30T18:15:47.000Z</Initiated>
</Upload>
<CommonPrefixes>
<Prefix>a/</Prefix>
</CommonPrefixes>
<CommonPrefixes>
<Prefix>b/</Prefix>
</CommonPrefixes>
</ListMultipartUploadsResult>
`
var NoSuchUploadErrorDump = `
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchUpload</Code>
<Message>Not relevant</Message>
<BucketName>sample</BucketName>
<RequestId>3F1B667FAD71C3D8</RequestId>
<HostId>kjhwqk</HostId>
</Error>
`
var InternalErrorDump = `
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>InternalError</Code>
<Message>Not relevant</Message>
<BucketName>sample</BucketName>
<RequestId>3F1B667FAD71C3D8</RequestId>
<HostId>kjhwqk</HostId>
</Error>
`

View File

@ -1,566 +0,0 @@
//
// goamz - Go packages to interact with the Amazon Web Services.
//
// https://wiki.ubuntu.com/goamz
//
// Copyright (c) 2011 Canonical Ltd.
//
package s3
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"gopkg.in/amz.v3/aws"
)
const debug = false
// The S3 type encapsulates operations with an S3 region.
type S3 struct {
aws.Auth
aws.Region
Sign aws.Signer
private byte // Reserve the right of using private data.
}
// The Bucket type encapsulates operations with an S3 bucket.
type Bucket struct {
*S3
Name string
}
// The Owner type represents the owner of the object in an S3 bucket.
type Owner struct {
ID string
DisplayName string
}
var (
attempts = defaultAttempts
defaultAttempts = aws.AttemptStrategy{
Min: 5,
Total: 5 * time.Second,
Delay: 200 * time.Millisecond,
}
)
// RetryAttempts sets whether failing S3 requests may be retried to cope
// with eventual consistency or temporary failures. It should not be
// called while operations are in progress.
func RetryAttempts(retry bool) {
if retry {
attempts = defaultAttempts
} else {
attempts = aws.AttemptStrategy{}
}
}
// New creates a new S3.
func New(auth aws.Auth, region aws.Region) *S3 {
return &S3{auth, region, aws.SignV4Factory(region.Name, "s3"), 0}
}
// Bucket returns a Bucket with the given name.
func (s3 *S3) Bucket(name string) (*Bucket, error) {
if strings.IndexAny(name, "/:@") >= 0 {
return nil, fmt.Errorf("bad S3 bucket: %q", name)
}
if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket {
name = strings.ToLower(name)
}
return &Bucket{s3, name}, nil
}
var createBucketConfiguration = `<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LocationConstraint>%s</LocationConstraint>
</CreateBucketConfiguration>`
// locationConstraint returns a *strings.Reader specifying a
// LocationConstraint if required for the region.
//
// See http://goo.gl/bh9Kq for details.
func (s3 *S3) locationConstraint() *strings.Reader {
constraint := ""
if s3.Region.S3LocationConstraint {
constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name)
}
return strings.NewReader(constraint)
}
type ACL string
const (
Private = ACL("private")
PublicRead = ACL("public-read")
PublicReadWrite = ACL("public-read-write")
AuthenticatedRead = ACL("authenticated-read")
BucketOwnerRead = ACL("bucket-owner-read")
BucketOwnerFull = ACL("bucket-owner-full-control")
)
// Put inserts an object into the S3 bucket.
//
// See http://goo.gl/FEBPD for details.
func (b *Bucket) Put(path string, data []byte, contType string, perm ACL) error {
body := bytes.NewReader(data)
return b.PutReader(path, body, int64(len(data)), contType, perm)
}
// PutBucket creates a new bucket.
//
// See http://goo.gl/ndjnR for details.
func (b *Bucket) PutBucket(perm ACL) error {
body := b.locationConstraint()
req, err := http.NewRequest("PUT", b.ResolveS3BucketEndpoint(b.Name), body)
if err != nil {
return err
}
req.Close = true
addAmazonDateHeader(req.Header)
req.Header.Add("x-amz-acl", string(perm))
if err := b.S3.Sign(req, b.Auth); err != nil {
return err
}
// Signing may read the request body.
if _, err := body.Seek(0, 0); err != nil {
return err
}
_, err = http.DefaultClient.Do(req)
return err
}
// DelBucket removes an existing S3 bucket. All objects in the bucket must
// be removed before the bucket itself can be removed.
//
// See http://goo.gl/GoBrY for details.
func (b *Bucket) DelBucket() (err error) {
req, err := http.NewRequest("DELETE", b.ResolveS3BucketEndpoint(b.Name), nil)
if err != nil {
return err
}
req.Close = true
addAmazonDateHeader(req.Header)
if err := b.S3.Sign(req, b.Auth); err != nil {
return err
}
resp, err := requestRetryLoop(req, attempts)
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// Get retrieves an object from an S3 bucket.
//
// See http://goo.gl/isCO7 for details.
func (b *Bucket) Get(path string) (data []byte, err error) {
body, err := b.GetReader(path)
if err != nil {
return nil, err
}
defer body.Close()
return ioutil.ReadAll(body)
}
// GetReader retrieves an object from an S3 bucket. It is the caller's
// responsibility to call Close on rc when finished reading.
func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) {
req, err := http.NewRequest("GET", b.Region.ResolveS3BucketEndpoint(b.Name), nil)
if err != nil {
return nil, err
}
req.Close = true
req.URL.Path += path
addAmazonDateHeader(req.Header)
if err := b.S3.Sign(req, b.Auth); err != nil {
return nil, err
}
resp, err := requestRetryLoop(req, attempts)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return nil, buildError(resp)
}
return resp.Body, nil
}
// PutReader inserts an object into the S3 bucket by consuming data
// from r until EOF. Passing in an io.ReadSeeker for r will optimize
// the memory usage.
func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL) error {
return b.PutReaderWithHeader(path, r, length, contType, perm, http.Header{})
}
// PutReaderWithHeader inserts an object into the S3 bucket by
// consuming data from r until EOF. It also adds the headers provided
// to the request. Passing in an io.ReadSeeker for r will optimize the
// memory usage.
func (b *Bucket) PutReaderWithHeader(path string, r io.Reader, length int64, contType string, perm ACL, hdrs http.Header) error {
// Convert the reader to a ReadSeeker so we can seek after
// signing.
seeker, ok := r.(io.ReadSeeker)
if !ok {
content, err := ioutil.ReadAll(r)
if err != nil {
return err
}
seeker = bytes.NewReader(content)
}
req, err := http.NewRequest("PUT", b.Region.ResolveS3BucketEndpoint(b.Name), seeker)
if err != nil {
return err
}
req.Header = hdrs
req.Close = true
req.URL.Path += path
req.ContentLength = length
req.Header.Add("Content-Type", contType)
req.Header.Add("x-amz-acl", string(perm))
addAmazonDateHeader(req.Header)
// Determine the current offset.
const seekFromPos = 1
prevPos, err := seeker.Seek(0, seekFromPos)
if err != nil {
return err
}
if err := b.S3.Sign(req, b.Auth); err != nil {
return err
}
// Signing may read the request body.
if _, err := seeker.Seek(prevPos, 0); err != nil {
return err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return buildError(resp) // closes body
}
resp.Body.Close()
return nil
}
// Del removes an object from the S3 bucket.
//
// See http://goo.gl/APeTt for details.
func (b *Bucket) Del(path string) error {
req, err := http.NewRequest("DELETE", b.ResolveS3BucketEndpoint(b.Name), nil)
if err != nil {
return err
}
req.Close = true
req.URL.Path += path
addAmazonDateHeader(req.Header)
if err := b.S3.Sign(req, b.Auth); err != nil {
return err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// The ListResp type holds the results of a List bucket operation.
type ListResp struct {
Name string
Prefix string
Delimiter string
Marker string
NextMarker string
MaxKeys int
// IsTruncated is true if the results have been truncated because
// there are more keys and prefixes than can fit in MaxKeys.
// N.B. this is the opposite sense to that documented (incorrectly) in
// http://goo.gl/YjQTc
IsTruncated bool
Contents []Key
CommonPrefixes []string `xml:">Prefix"`
}
// The Key type represents an item stored in an S3 bucket.
type Key struct {
Key string
LastModified string
Size int64
// ETag gives the hex-encoded MD5 sum of the contents,
// surrounded with double-quotes.
ETag string
StorageClass string
Owner Owner
}
// List returns information about objects in an S3 bucket.
//
// The prefix parameter limits the response to keys that begin with the
// specified prefix.
//
// The delim parameter causes the response to group all of the keys that
// share a common prefix up to the next delimiter in a single entry within
// the CommonPrefixes field. You can use delimiters to separate a bucket
// into different groupings of keys, similar to how folders would work.
//
// The marker parameter specifies the key to start with when listing objects
// in a bucket. Amazon S3 lists objects in alphabetical order and
// will return keys alphabetically greater than the marker.
//
// The max parameter specifies how many keys + common prefixes to return in
// the response. The default is 1000.
//
// For example, given these keys in a bucket:
//
// index.html
// index2.html
// photos/2006/January/sample.jpg
// photos/2006/February/sample2.jpg
// photos/2006/February/sample3.jpg
// photos/2006/February/sample4.jpg
//
// Listing this bucket with delimiter set to "/" would yield the
// following result:
//
// &ListResp{
// Name: "sample-bucket",
// MaxKeys: 1000,
// Delimiter: "/",
// Contents: []Key{
// {Key: "index.html", "index2.html"},
// },
// CommonPrefixes: []string{
// "photos/",
// },
// }
//
// Listing the same bucket with delimiter set to "/" and prefix set to
// "photos/2006/" would yield the following result:
//
// &ListResp{
// Name: "sample-bucket",
// MaxKeys: 1000,
// Delimiter: "/",
// Prefix: "photos/2006/",
// CommonPrefixes: []string{
// "photos/2006/February/",
// "photos/2006/January/",
// },
// }
//
// See http://goo.gl/YjQTc for details.
func (b *Bucket) List(prefix, delim, marker string, max int) (*ListResp, error) {
req, err := http.NewRequest("GET", b.ResolveS3BucketEndpoint(b.Name), nil)
if err != nil {
return nil, err
}
req.Close = true
query := req.URL.Query()
query.Add("prefix", prefix)
query.Add("delimiter", delim)
query.Add("marker", marker)
if max != 0 {
query.Add("max-keys", strconv.FormatInt(int64(max), 10))
}
req.URL.RawQuery = query.Encode()
addAmazonDateHeader(req.Header)
if err := b.S3.Sign(req, b.Auth); err != nil {
return nil, err
}
resp, err := requestRetryLoop(req, attempts)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return nil, buildError(resp) // closes body
}
var result ListResp
err = xml.NewDecoder(resp.Body).Decode(&result)
resp.Body.Close()
return &result, nil
}
// URL returns a non-signed URL that allows retriving the
// object at path. It only works if the object is publicly
// readable (see SignedURL).
func (b *Bucket) URL(path string) string {
return b.ResolveS3BucketEndpoint(b.Name) + path
}
// SignedURL returns a URL which can be used to fetch objects without
// signing for the given duration.
func (b *Bucket) SignedURL(path string, expires time.Duration) (string, error) {
req, err := http.NewRequest("GET", b.URL(path), nil)
if err != nil {
return "", err
}
req.Header.Add("date", time.Now().Format(aws.ISO8601BasicFormat))
if err := aws.SignV4URL(req, b.Auth, b.Region.Name, "s3", expires); err != nil {
return "", err
}
return req.URL.String(), nil
}
type request struct {
method string
bucket string
path string
signpath string
params url.Values
headers http.Header
baseurl string
payload io.Reader
prepared bool
}
func (req *request) url() (*url.URL, error) {
u, err := url.Parse(req.baseurl)
if err != nil {
return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err)
}
u.RawQuery = req.params.Encode()
u.Path = req.path
return u, nil
}
// Error represents an error in an operation with S3.
type Error struct {
StatusCode int // HTTP status code (200, 403, ...)
Code string // EC2 error code ("UnsupportedOperation", ...)
Message string // The human-oriented error message
BucketName string
RequestId string
HostId string
}
func (e *Error) Error() string {
return e.Message
}
func buildError(r *http.Response) error {
if debug {
log.Printf("got error (status code %v)", r.StatusCode)
data, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Printf("\tread error: %v", err)
} else {
log.Printf("\tdata:\n%s\n\n", data)
}
r.Body = ioutil.NopCloser(bytes.NewBuffer(data))
}
err := Error{}
// TODO return error if Unmarshal fails?
xml.NewDecoder(r.Body).Decode(&err)
r.Body.Close()
err.StatusCode = r.StatusCode
if err.Message == "" {
err.Message = r.Status
}
if debug {
log.Printf("err: %#v\n", err)
}
return &err
}
func shouldRetry(err error) bool {
if err == nil {
return false
}
switch err {
case io.ErrUnexpectedEOF, io.EOF:
return true
}
switch e := err.(type) {
case *net.DNSError:
return true
case *net.OpError:
switch e.Op {
case "read", "write":
return true
}
case *Error:
switch e.Code {
case "InternalError", "NoSuchUpload", "NoSuchBucket":
return true
}
}
return false
}
func hasCode(err error, code string) bool {
s3err, ok := err.(*Error)
return ok && s3err.Code == code
}
// requestRetryLoop attempts to send the request until the given
// strategy says to stop.
func requestRetryLoop(req *http.Request, retryStrat aws.AttemptStrategy) (*http.Response, error) {
for attempt := attempts.Start(); attempt.Next(); {
if debug {
log.Printf("Full URL (in loop): %v", req.URL)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
if shouldRetry(err) && attempt.HasNext() {
continue
}
return nil, fmt.Errorf("making request: %v", err)
}
if debug {
log.Printf("Full response (in loop): %v", resp)
}
return resp, nil
}
return nil, fmt.Errorf("could not complete the request within the specified retry attempts")
}
func addAmazonDateHeader(header http.Header) {
header.Set("x-amz-date", time.Now().In(time.UTC).Format(aws.ISO8601BasicFormat))
}

View File

@ -1,321 +0,0 @@
package s3_test
import (
"bytes"
"io/ioutil"
"net/http"
"testing"
"time"
. "gopkg.in/check.v1"
"gopkg.in/amz.v3/aws"
"gopkg.in/amz.v3/s3"
"gopkg.in/amz.v3/testutil"
)
func Test(t *testing.T) {
TestingT(t)
}
type S struct {
s3 *s3.S3
}
var _ = Suite(&S{})
var testServer = testutil.NewHTTPServer()
func (s *S) SetUpSuite(c *C) {
testServer.Start()
s.s3 = s3.New(
aws.Auth{"abc", "123"},
aws.Region{
Name: "faux-region-1",
S3Endpoint: testServer.URL,
},
)
}
func (s *S) TearDownSuite(c *C) {
s3.SetAttemptStrategy(nil)
testServer.Stop()
}
func (s *S) SetUpTest(c *C) {
attempts := aws.AttemptStrategy{
Total: 300 * time.Millisecond,
Delay: 100 * time.Millisecond,
}
s3.SetAttemptStrategy(&attempts)
}
func (s *S) TearDownTest(c *C) {
testServer.Flush()
}
// PutBucket docs: http://goo.gl/kBTCu
func (s *S) TestPutBucket(c *C) {
testServer.Response(200, nil, "")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
err = b.PutBucket(s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/")
c.Assert(req.Header["Date"], Not(Equals), "")
}
func (s *S) TestURL(c *C) {
testServer.Response(200, nil, "content")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
url := b.URL("name")
r, err := http.Get(url)
c.Assert(err, IsNil)
data, err := ioutil.ReadAll(r.Body)
r.Body.Close()
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "content")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/bucket/name")
}
// DeleteBucket docs: http://goo.gl/GoBrY
func (s *S) TestDelBucket(c *C) {
testServer.Response(204, nil, "")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
err = b.DelBucket()
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "DELETE")
c.Assert(req.URL.Path, Equals, "/bucket/")
c.Assert(req.Header["Date"], Not(Equals), "")
}
// GetObject docs: http://goo.gl/isCO7
func (s *S) TestGet(c *C) {
testServer.Response(200, nil, "content")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
data, err := b.Get("name")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "content")
}
func (s *S) TestGetReader(c *C) {
testServer.Response(200, nil, "content")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
rc, err := b.GetReader("name")
c.Assert(err, IsNil)
data, err := ioutil.ReadAll(rc)
rc.Close()
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "content")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
}
func (s *S) TestGetNotFound(c *C) {
for i := 0; i < 10; i++ {
testServer.Response(404, nil, GetObjectErrorDump)
}
b, err := s.s3.Bucket("non-existent-bucket")
c.Assert(err, IsNil)
data, err := b.Get("non-existent")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/non-existent-bucket/non-existent")
c.Assert(req.Header["Date"], Not(Equals), "")
s3err, _ := err.(*s3.Error)
c.Assert(s3err, NotNil)
c.Assert(s3err.StatusCode, Equals, 404)
c.Assert(s3err.BucketName, Equals, "non-existent-bucket")
c.Assert(s3err.RequestId, Equals, "3F1B667FAD71C3D8")
c.Assert(s3err.HostId, Equals, "L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D")
c.Assert(s3err.Code, Equals, "NoSuchBucket")
c.Assert(s3err.Message, Equals, "The specified bucket does not exist")
c.Assert(s3err.Error(), Equals, "The specified bucket does not exist")
c.Assert(data, IsNil)
}
// PutObject docs: http://goo.gl/FEBPD
func (s *S) TestPutObject(c *C) {
testServer.Response(200, nil, "")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
err = b.Put("name", []byte("content"), "content-type", s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
//c.Assert(req.Header["Content-MD5"], DeepEquals, "...")
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
func (s *S) TestPutReader(c *C) {
testServer.Response(200, nil, "")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
buf := bytes.NewReader([]byte("content"))
err = b.PutReader("name", buf, int64(buf.Len()), "content-type", s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
//c.Assert(req.Header["Content-MD5"], Equals, "...")
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
func (s *S) TestPutReaderWithHeader(c *C) {
testServer.Response(200, nil, "")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
buf := bytes.NewReader([]byte("content"))
err = b.PutReaderWithHeader("name", buf, int64(buf.Len()), "content-type", s3.Private, http.Header{
"Cache-Control": []string{"max-age=5"},
})
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
c.Assert(req.Header["Cache-Control"], DeepEquals, []string{"max-age=5"})
}
// DelObject docs: http://goo.gl/APeTt
func (s *S) TestDelObject(c *C) {
testServer.Response(200, nil, "")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
err = b.Del("name")
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "DELETE")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
}
// Bucket List Objects docs: http://goo.gl/YjQTc
func (s *S) TestList(c *C) {
testServer.Response(200, nil, GetListResultDump1)
b, err := s.s3.Bucket("quotes")
c.Assert(err, IsNil)
data, err := b.List("N", "", "", 0)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/quotes/")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(req.Form["prefix"], DeepEquals, []string{"N"})
c.Assert(req.Form["delimiter"], DeepEquals, []string{""})
c.Assert(req.Form["marker"], DeepEquals, []string{""})
c.Assert(req.Form["max-keys"], DeepEquals, []string(nil))
c.Assert(data.Name, Equals, "quotes")
c.Assert(data.Prefix, Equals, "N")
c.Assert(data.IsTruncated, Equals, false)
c.Assert(len(data.Contents), Equals, 2)
c.Assert(data.Contents[0].Key, Equals, "Nelson")
c.Assert(data.Contents[0].LastModified, Equals, "2006-01-01T12:00:00.000Z")
c.Assert(data.Contents[0].ETag, Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`)
c.Assert(data.Contents[0].Size, Equals, int64(5))
c.Assert(data.Contents[0].StorageClass, Equals, "STANDARD")
c.Assert(data.Contents[0].Owner.ID, Equals, "bcaf161ca5fb16fd081034f")
c.Assert(data.Contents[0].Owner.DisplayName, Equals, "webfile")
c.Assert(data.Contents[1].Key, Equals, "Neo")
c.Assert(data.Contents[1].LastModified, Equals, "2006-01-01T12:00:00.000Z")
c.Assert(data.Contents[1].ETag, Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`)
c.Assert(data.Contents[1].Size, Equals, int64(4))
c.Assert(data.Contents[1].StorageClass, Equals, "STANDARD")
c.Assert(data.Contents[1].Owner.ID, Equals, "bcaf1ffd86a5fb16fd081034f")
c.Assert(data.Contents[1].Owner.DisplayName, Equals, "webfile")
}
func (s *S) TestListWithDelimiter(c *C) {
testServer.Response(200, nil, GetListResultDump2)
b, err := s.s3.Bucket("quotes")
c.Assert(err, IsNil)
data, err := b.List("photos/2006/", "/", "some-marker", 1000)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/quotes/")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(req.Form["prefix"], DeepEquals, []string{"photos/2006/"})
c.Assert(req.Form["delimiter"], DeepEquals, []string{"/"})
c.Assert(req.Form["marker"], DeepEquals, []string{"some-marker"})
c.Assert(req.Form["max-keys"], DeepEquals, []string{"1000"})
c.Assert(data.Name, Equals, "example-bucket")
c.Assert(data.Prefix, Equals, "photos/2006/")
c.Assert(data.Delimiter, Equals, "/")
c.Assert(data.Marker, Equals, "some-marker")
c.Assert(data.IsTruncated, Equals, false)
c.Assert(len(data.Contents), Equals, 0)
c.Assert(data.CommonPrefixes, DeepEquals, []string{"photos/2006/feb/", "photos/2006/jan/"})
}
func (s *S) TestRetryAttempts(c *C) {
s3.SetAttemptStrategy(nil)
orig := s3.AttemptStrategy()
s3.RetryAttempts(false)
c.Assert(s3.AttemptStrategy(), Equals, aws.AttemptStrategy{})
s3.RetryAttempts(true)
c.Assert(s3.AttemptStrategy(), Equals, orig)
}

View File

@ -1,610 +0,0 @@
package s3_test
import (
"bytes"
"crypto/md5"
"fmt"
"io/ioutil"
"net"
"net/http"
"sort"
"strings"
"time"
. "gopkg.in/check.v1"
"gopkg.in/amz.v3/aws"
"gopkg.in/amz.v3/s3"
"gopkg.in/amz.v3/testutil"
)
// AmazonServer represents an Amazon S3 server.
type AmazonServer struct {
auth aws.Auth
}
func (s *AmazonServer) SetUp(c *C) {
auth, err := aws.EnvAuth()
if err != nil {
c.Fatal(err.Error())
}
s.auth = auth
}
var _ = Suite(&AmazonClientSuite{Region: aws.USEast})
var _ = Suite(&AmazonClientSuite{Region: aws.EUWest})
var _ = Suite(&AmazonDomainClientSuite{Region: aws.USEast})
// AmazonClientSuite tests the client against a live S3 server.
type AmazonClientSuite struct {
aws.Region
srv AmazonServer
ClientTests
}
func (s *AmazonClientSuite) SetUpSuite(c *C) {
if !testutil.Amazon {
c.Skip("live tests against AWS disabled (no -amazon)")
}
s.srv.SetUp(c)
s.s3 = s3.New(s.srv.auth, s.Region)
// In case tests were interrupted in the middle before.
s.ClientTests.Cleanup()
}
func (s *AmazonClientSuite) TearDownTest(c *C) {
s.ClientTests.Cleanup()
}
// AmazonDomainClientSuite tests the client against a live S3
// server using bucket names in the endpoint domain name rather
// than the request path.
type AmazonDomainClientSuite struct {
aws.Region
srv AmazonServer
ClientTests
}
func (s *AmazonDomainClientSuite) SetUpSuite(c *C) {
if !testutil.Amazon {
c.Skip("live tests against AWS disabled (no -amazon)")
}
s.srv.SetUp(c)
region := s.Region
region.S3BucketEndpoint += "https://s3.amazonaws.com/${bucket}/"
s.s3 = s3.New(s.srv.auth, region)
s.ClientTests.Cleanup()
}
func (s *AmazonDomainClientSuite) TearDownTest(c *C) {
s.ClientTests.Cleanup()
}
// ClientTests defines integration tests designed to test the client.
// It is not used as a test suite in itself, but embedded within
// another type.
type ClientTests struct {
s3 *s3.S3
}
func (s *ClientTests) Cleanup() {
killBucket(testBucket(s.s3))
}
func testBucket(s *s3.S3) *s3.Bucket {
// Watch out! If this function is corrupted and made to match with something
// people own, killBucket will happily remove *everything* inside the bucket.
key := s.Auth.AccessKey
if len(key) >= 8 {
key = s.Auth.AccessKey[:8]
}
b, err := s.Bucket(strings.ToLower(fmt.Sprintf(
"goamz-%s-%s-%s",
s.Region.Name,
key,
// Add in the time element to help isolate tests from one
// another.
time.Now().Format("20060102T150405.999999999"),
)))
if err != nil {
panic(err)
}
return b
}
var attempts = aws.AttemptStrategy{
Min: 5,
Total: 20 * time.Second,
Delay: 100 * time.Millisecond,
}
func killBucket(b *s3.Bucket) {
var err error
for attempt := attempts.Start(); attempt.Next(); {
err = b.DelBucket()
if err == nil {
return
}
if _, ok := err.(*net.DNSError); ok {
return
}
e, ok := err.(*s3.Error)
if ok && e.Code == "NoSuchBucket" {
return
}
if ok && e.Code == "BucketNotEmpty" {
// Errors are ignored here. Just retry.
resp, err := b.List("", "", "", 1000)
if err == nil {
for _, key := range resp.Contents {
_ = b.Del(key.Key)
}
}
multis, _, _ := b.ListMulti("", "")
for _, m := range multis {
_ = m.Abort()
}
}
}
message := "cannot delete test bucket"
if err != nil {
message += ": " + err.Error()
}
panic(message)
}
func (s *ClientTests) TestSignedUrl(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.PublicRead)
c.Assert(err, IsNil)
s.testSignedUrl(c, b, "name")
// Test that various special characters get escaped properly.
s.testSignedUrl(c, b, "&@$=:,!-_.*'( )")
}
func (s *ClientTests) testSignedUrl(c *C, b *s3.Bucket, name string) {
err := b.Put(name, []byte("test for signed URLs."), "text/plain", s3.Private)
c.Assert(err, IsNil)
defer b.Del(name)
req, err := http.NewRequest("GET", b.URL(name), nil)
c.Assert(err, IsNil)
resp, err := http.DefaultClient.Do(req)
c.Assert(err, IsNil)
err = s3.BuildError(resp)
c.Check(err, NotNil)
c.Check(err.(*s3.Error).Code, Equals, "AccessDenied")
url, err := b.SignedURL(name, 24*time.Hour)
c.Assert(err, IsNil)
req, err = http.NewRequest("GET", url, nil)
c.Assert(err, IsNil)
resp, err = http.DefaultClient.Do(req)
c.Assert(err, IsNil)
body, err := ioutil.ReadAll(resp.Body)
c.Assert(err, IsNil)
c.Check(string(body), Equals, "test for signed URLs.")
}
func (s *ClientTests) TestBasicFunctionality(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.PublicRead)
c.Assert(err, IsNil)
err = b.Put("name", []byte("yo!"), "text/plain", s3.PublicRead)
c.Assert(err, IsNil)
defer b.Del("name")
data, err := b.Get("name")
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "yo!")
buf := bytes.NewReader([]byte("hey!"))
err = b.PutReader("name2", buf, int64(buf.Len()), "text/plain", s3.Private)
c.Assert(err, IsNil)
defer b.Del("name2")
rc, err := b.GetReader("name2")
c.Assert(err, IsNil)
data, err = ioutil.ReadAll(rc)
c.Check(err, IsNil)
c.Check(string(data), Equals, "hey!")
rc.Close()
data, err = b.Get("name2")
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "hey!")
err = b.Del("name")
c.Assert(err, IsNil)
err = b.Del("name2")
c.Assert(err, IsNil)
err = b.DelBucket()
c.Assert(err, IsNil)
}
func (s *ClientTests) TestGetNotFound(c *C) {
b, err := s.s3.Bucket("goamz-" + s.s3.Auth.AccessKey)
c.Assert(err, IsNil)
data, err := b.Get("non-existent")
s3err, _ := err.(*s3.Error)
c.Assert(s3err, NotNil)
c.Assert(s3err.StatusCode, Equals, 404)
c.Assert(s3err.Code, Equals, "NoSuchBucket")
c.Assert(s3err.Message, Equals, "The specified bucket does not exist")
c.Assert(data, IsNil)
}
// Communicate with all endpoints to see if they are alive.
func (s *ClientTests) TestRegions(c *C) {
type result struct {
aws.Region
error
}
results := make(chan result, len(aws.Regions))
for _, region := range aws.Regions {
go func(r aws.Region) {
s := s3.New(s.s3.Auth, r)
b, err := s.Bucket("goamz-" + s.Auth.AccessKey)
if !c.Check(err, IsNil) {
return
}
_, err = b.Get("non-existent")
if !c.Check(err, NotNil) {
return
}
results <- result{r, err}
}(region)
}
for _ = range aws.Regions {
result := <-results
if s3_err, ok := result.error.(*s3.Error); ok {
if result.Region == aws.CNNorth && s3_err.Code == "InvalidAccessKeyId" {
c.Log("You must utilize an account specifically for CNNorth.")
continue
}
c.Check(s3_err.Code, Matches, "NoSuchBucket")
} else if _, ok = result.error.(*net.DNSError); ok {
// Okay as well.
} else {
c.Errorf("Non-S3 error: %s", result.error)
}
}
}
var objectNames = []string{
"index.html",
"index2.html",
"photos/2006/February/sample2.jpg",
"photos/2006/February/sample3.jpg",
"photos/2006/February/sample4.jpg",
"photos/2006/January/sample.jpg",
"test/bar",
"test/foo",
}
func keys(names ...string) []s3.Key {
ks := make([]s3.Key, len(names))
for i, name := range names {
ks[i].Key = name
}
return ks
}
// As the ListResp specifies all the parameters to the
// request too, we use it to specify request parameters
// and expected results. The Contents field is
// used only for the key names inside it.
var listTests = []s3.ListResp{
// normal list.
{
Contents: keys(objectNames...),
}, {
Marker: objectNames[0],
Contents: keys(objectNames[1:]...),
}, {
Marker: objectNames[0] + "a",
Contents: keys(objectNames[1:]...),
}, {
Marker: "z",
},
// limited results.
{
MaxKeys: 2,
Contents: keys(objectNames[0:2]...),
IsTruncated: true,
}, {
MaxKeys: 2,
Marker: objectNames[0],
Contents: keys(objectNames[1:3]...),
IsTruncated: true,
}, {
MaxKeys: 2,
Marker: objectNames[len(objectNames)-2],
Contents: keys(objectNames[len(objectNames)-1:]...),
},
// with delimiter
{
Delimiter: "/",
CommonPrefixes: []string{"photos/", "test/"},
Contents: keys("index.html", "index2.html"),
}, {
Delimiter: "/",
Prefix: "photos/2006/",
CommonPrefixes: []string{"photos/2006/February/", "photos/2006/January/"},
}, {
Delimiter: "/",
Prefix: "t",
CommonPrefixes: []string{"test/"},
}, {
Delimiter: "/",
MaxKeys: 1,
Contents: keys("index.html"),
IsTruncated: true,
}, {
Delimiter: "/",
MaxKeys: 1,
Marker: "index2.html",
CommonPrefixes: []string{"photos/"},
IsTruncated: true,
}, {
Delimiter: "/",
MaxKeys: 1,
Marker: "photos/",
CommonPrefixes: []string{"test/"},
IsTruncated: false,
}, {
Delimiter: "Feb",
CommonPrefixes: []string{"photos/2006/Feb"},
Contents: keys("index.html", "index2.html", "photos/2006/January/sample.jpg", "test/bar", "test/foo"),
},
}
func (s *ClientTests) TestDoublePutBucket(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.PublicRead)
c.Assert(err, IsNil)
err = b.PutBucket(s3.PublicRead)
if err != nil {
c.Assert(err, FitsTypeOf, new(s3.Error))
c.Assert(err.(*s3.Error).Code, Equals, "BucketAlreadyOwnedByYou")
}
}
func (s *ClientTests) TestBucketList(c *C) {
b := testBucket(s.s3)
defer b.DelBucket()
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
objData := make(map[string][]byte)
for i, path := range objectNames {
data := []byte(strings.Repeat("a", i))
err := b.Put(path, data, "text/plain", s3.Private)
c.Assert(err, IsNil)
defer b.Del(path)
objData[path] = data
}
for i, t := range listTests {
c.Logf("test %d", i)
resp, err := b.List(t.Prefix, t.Delimiter, t.Marker, t.MaxKeys)
c.Assert(err, IsNil)
c.Check(resp.Name, Equals, b.Name)
c.Check(resp.Delimiter, Equals, t.Delimiter)
c.Check(resp.IsTruncated, Equals, t.IsTruncated)
c.Check(resp.CommonPrefixes, DeepEquals, t.CommonPrefixes)
checkContents(c, resp.Contents, objData, t.Contents)
}
}
func etag(data []byte) string {
sum := md5.New()
sum.Write(data)
return fmt.Sprintf(`"%x"`, sum.Sum(nil))
}
func checkContents(c *C, contents []s3.Key, data map[string][]byte, expected []s3.Key) {
c.Assert(contents, HasLen, len(expected))
for i, k := range contents {
c.Check(k.Key, Equals, expected[i].Key)
// TODO mtime
c.Check(k.Size, Equals, int64(len(data[k.Key])))
c.Check(k.ETag, Equals, etag(data[k.Key]))
}
}
func (s *ClientTests) TestMultiInitPutList(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
c.Assert(multi.UploadId, Matches, ".+")
defer multi.Abort()
var sent []s3.Part
for i := 0; i < 5; i++ {
p, err := multi.PutPart(i+1, strings.NewReader(fmt.Sprintf("<part %d>", i+1)))
c.Assert(err, IsNil)
c.Assert(p.N, Equals, i+1)
c.Assert(p.Size, Equals, int64(8))
c.Assert(p.ETag, Matches, ".+")
sent = append(sent, p)
}
s3.SetListPartsMax(2)
parts, err := multi.ListParts()
c.Assert(err, IsNil)
c.Assert(parts, HasLen, len(sent))
for i := range parts {
c.Assert(parts[i].N, Equals, sent[i].N)
c.Assert(parts[i].Size, Equals, sent[i].Size)
c.Assert(parts[i].ETag, Equals, sent[i].ETag)
}
err = multi.Complete(parts)
s3err, failed := err.(*s3.Error)
c.Assert(failed, Equals, true)
c.Assert(s3err.Code, Equals, "EntityTooSmall")
err = multi.Abort()
c.Assert(err, IsNil)
_, err = multi.ListParts()
s3err, ok := err.(*s3.Error)
c.Assert(ok, Equals, true)
c.Assert(s3err.Code, Equals, "NoSuchUpload")
}
// This may take a minute or more due to the minimum size accepted S3
// on multipart upload parts.
func (s *ClientTests) TestMultiComplete(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
c.Assert(multi.UploadId, Matches, ".+")
defer multi.Abort()
// Minimum size S3 accepts for all but the last part is 5MB.
data1 := make([]byte, 5*1024*1024)
data2 := []byte("<part 2>")
part1, err := multi.PutPart(1, bytes.NewReader(data1))
c.Assert(err, IsNil)
part2, err := multi.PutPart(2, bytes.NewReader(data2))
c.Assert(err, IsNil)
// Purposefully reversed. The order requirement must be handled.
err = multi.Complete([]s3.Part{part2, part1})
c.Assert(err, IsNil)
data, err := b.Get("multi")
c.Assert(err, IsNil)
c.Assert(len(data), Equals, len(data1)+len(data2))
for i := range data1 {
if data[i] != data1[i] {
c.Fatalf("uploaded object at byte %d: want %d, got %d", data1[i], data[i])
}
}
c.Assert(string(data[len(data1):]), Equals, string(data2))
}
type multiList []*s3.Multi
func (l multiList) Len() int { return len(l) }
func (l multiList) Less(i, j int) bool { return l[i].Key < l[j].Key }
func (l multiList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (s *ClientTests) TestListMulti(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
// Ensure an empty state before testing its behavior.
multis, _, err := b.ListMulti("", "")
for _, m := range multis {
err := m.Abort()
c.Assert(err, IsNil)
}
keys := []string{
"a/multi2",
"a/multi3",
"b/multi4",
"multi1",
}
for _, key := range keys {
m, err := b.InitMulti(key, "", s3.Private)
c.Assert(err, IsNil)
defer m.Abort()
}
// Amazon's implementation of the multiple-request listing for
// multipart uploads in progress seems broken in multiple ways.
// (next tokens are not provided, etc).
//s3.SetListMultiMax(2)
multis, prefixes, err := b.ListMulti("", "")
c.Assert(err, IsNil)
for attempt := attempts.Start(); attempt.Next() && len(multis) < len(keys); {
multis, prefixes, err = b.ListMulti("", "")
c.Assert(err, IsNil)
}
sort.Sort(multiList(multis))
c.Assert(prefixes, IsNil)
var gotKeys []string
for _, m := range multis {
gotKeys = append(gotKeys, m.Key)
}
c.Assert(gotKeys, DeepEquals, keys)
for _, m := range multis {
c.Assert(m.Bucket, Equals, b)
c.Assert(m.UploadId, Matches, ".+")
}
multis, prefixes, err = b.ListMulti("", "/")
for attempt := attempts.Start(); attempt.Next() && len(prefixes) < 2; {
multis, prefixes, err = b.ListMulti("", "")
c.Assert(err, IsNil)
}
c.Assert(err, IsNil)
c.Assert(prefixes, DeepEquals, []string{"a/", "b/"})
c.Assert(multis, HasLen, 1)
c.Assert(multis[0].Bucket, Equals, b)
c.Assert(multis[0].Key, Equals, "multi1")
c.Assert(multis[0].UploadId, Matches, ".+")
for attempt := attempts.Start(); attempt.Next() && len(multis) < 2; {
multis, prefixes, err = b.ListMulti("", "")
c.Assert(err, IsNil)
}
multis, prefixes, err = b.ListMulti("a/", "/")
c.Assert(err, IsNil)
c.Assert(prefixes, IsNil)
c.Assert(multis, HasLen, 2)
c.Assert(multis[0].Bucket, Equals, b)
c.Assert(multis[0].Key, Equals, "a/multi2")
c.Assert(multis[0].UploadId, Matches, ".+")
c.Assert(multis[1].Bucket, Equals, b)
c.Assert(multis[1].Key, Equals, "a/multi3")
c.Assert(multis[1].UploadId, Matches, ".+")
}
func (s *ClientTests) TestMultiPutAllZeroLength(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
defer multi.Abort()
// This tests an edge case. Amazon requires at least one
// part for multiprat uploads to work, even the part is empty.
parts, err := multi.PutAll(strings.NewReader(""), 5*1024*1024)
c.Assert(err, IsNil)
c.Assert(parts, HasLen, 1)
c.Assert(parts[0].Size, Equals, int64(0))
c.Assert(parts[0].ETag, Equals, `"d41d8cd98f00b204e9800998ecf8427e"`)
err = multi.Complete(parts)
c.Assert(err, IsNil)
}

View File

@ -1,77 +0,0 @@
package s3_test
import (
. "gopkg.in/check.v1"
"gopkg.in/amz.v3/aws"
"gopkg.in/amz.v3/s3"
"gopkg.in/amz.v3/s3/s3test"
)
type LocalServer struct {
auth aws.Auth
region aws.Region
srv *s3test.Server
config *s3test.Config
}
func (s *LocalServer) SetUp(c *C) {
srv, err := s3test.NewServer(s.config)
c.Assert(err, IsNil)
c.Assert(srv, NotNil)
s.srv = srv
s.region = aws.Region{
Name: "faux-region-1",
S3Endpoint: srv.URL(),
S3LocationConstraint: true, // s3test server requires a LocationConstraint
}
}
// LocalServerSuite defines tests that will run
// against the local s3test server. It includes
// selected tests from ClientTests;
// when the s3test functionality is sufficient, it should
// include all of them, and ClientTests can be simply embedded.
type LocalServerSuite struct {
srv LocalServer
clientTests ClientTests
}
var (
// run tests twice, once in us-east-1 mode, once not.
_ = Suite(&LocalServerSuite{})
_ = Suite(&LocalServerSuite{
srv: LocalServer{
config: &s3test.Config{
Send409Conflict: true,
},
},
})
)
func (s *LocalServerSuite) SetUpSuite(c *C) {
s.srv.SetUp(c)
s.clientTests.s3 = s3.New(s.srv.auth, s.srv.region)
s.clientTests.Cleanup()
}
func (s *LocalServerSuite) TearDownTest(c *C) {
s.clientTests.Cleanup()
}
func (s *LocalServerSuite) TestBasicFunctionality(c *C) {
s.clientTests.TestBasicFunctionality(c)
}
func (s *LocalServerSuite) TestGetNotFound(c *C) {
s.clientTests.TestGetNotFound(c)
}
func (s *LocalServerSuite) TestBucketList(c *C) {
s.clientTests.TestBucketList(c)
}
func (s *LocalServerSuite) TestDoublePutBucket(c *C) {
s.clientTests.TestDoublePutBucket(c)
}

View File

@ -1,629 +0,0 @@
package s3test
import (
"bytes"
"crypto/md5"
"encoding/hex"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
"gopkg.in/amz.v3/s3"
)
const debug = false
type s3Error struct {
statusCode int
XMLName struct{} `xml:"Error"`
Code string
Message string
BucketName string
RequestId string
HostId string
}
type action struct {
srv *Server
w http.ResponseWriter
req *http.Request
reqId string
}
// Config controls the internal behaviour of the Server. A nil config is the default
// and behaves as if all configurations assume their default behaviour. Once passed
// to NewServer, the configuration must not be modified.
type Config struct {
// Send409Conflict controls how the Server will respond to calls to PUT on a
// previously existing bucket. The default is false, and corresponds to the
// us-east-1 s3 enpoint. Setting this value to true emulates the behaviour of
// all other regions.
// http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html
Send409Conflict bool
}
func (c *Config) send409Conflict() bool {
if c != nil {
return c.Send409Conflict
}
return false
}
// Server is a fake S3 server for testing purposes.
// All of the data for the server is kept in memory.
type Server struct {
url string
reqId int
listener net.Listener
mu sync.Mutex
buckets map[string]*bucket
config *Config
}
type bucket struct {
name string
acl s3.ACL
ctime time.Time
objects map[string]*object
}
type object struct {
name string
mtime time.Time
meta http.Header // metadata to return with requests.
checksum []byte // also held as Content-MD5 in meta.
data []byte
}
// A resource encapsulates the subject of an HTTP request.
// The resource referred to may or may not exist
// when the request is made.
type resource interface {
put(a *action) interface{}
get(a *action) interface{}
post(a *action) interface{}
delete(a *action) interface{}
}
func NewServer(config *Config) (*Server, error) {
l, err := net.Listen("tcp", "localhost:0")
if err != nil {
return nil, fmt.Errorf("cannot listen on localhost: %v", err)
}
srv := &Server{
listener: l,
url: "http://" + l.Addr().String(),
buckets: make(map[string]*bucket),
config: config,
}
go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
srv.serveHTTP(w, req)
}))
return srv, nil
}
// Quit closes down the server.
func (srv *Server) Quit() {
srv.listener.Close()
}
// URL returns a URL for the server.
func (srv *Server) URL() string {
return srv.url
}
func fatalf(code int, codeStr string, errf string, a ...interface{}) {
panic(&s3Error{
statusCode: code,
Code: codeStr,
Message: fmt.Sprintf(errf, a...),
})
}
// serveHTTP serves the S3 protocol.
func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) {
// ignore error from ParseForm as it's usually spurious.
req.ParseForm()
srv.mu.Lock()
defer srv.mu.Unlock()
if debug {
log.Printf("s3test %q %q", req.Method, req.URL)
}
a := &action{
srv: srv,
w: w,
req: req,
reqId: fmt.Sprintf("%09X", srv.reqId),
}
srv.reqId++
var r resource
defer func() {
switch err := recover().(type) {
case *s3Error:
switch r := r.(type) {
case objectResource:
err.BucketName = r.bucket.name
case bucketResource:
err.BucketName = r.name
}
err.RequestId = a.reqId
// TODO HostId
w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`)
w.WriteHeader(err.statusCode)
xmlMarshal(w, err)
case nil:
default:
panic(err)
}
}()
r = srv.resourceForURL(req.URL)
var resp interface{}
switch req.Method {
case "PUT":
resp = r.put(a)
case "GET", "HEAD":
resp = r.get(a)
case "DELETE":
resp = r.delete(a)
case "POST":
resp = r.post(a)
default:
fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method)
}
if resp != nil && req.Method != "HEAD" {
xmlMarshal(w, resp)
}
}
// xmlMarshal is the same as xml.Marshal except that
// it panics on error. The marshalling should not fail,
// but we want to know if it does.
func xmlMarshal(w io.Writer, x interface{}) {
if err := xml.NewEncoder(w).Encode(x); err != nil {
panic(fmt.Errorf("error marshalling %#v: %v", x, err))
}
}
// In a fully implemented test server, each of these would have
// its own resource type.
var unimplementedBucketResourceNames = map[string]bool{
"acl": true,
"lifecycle": true,
"policy": true,
"location": true,
"logging": true,
"notification": true,
"versions": true,
"requestPayment": true,
"versioning": true,
"website": true,
"uploads": true,
}
var unimplementedObjectResourceNames = map[string]bool{
"uploadId": true,
"acl": true,
"torrent": true,
"uploads": true,
}
var pathRegexp = regexp.MustCompile("/(([^/]+)(/(.*))?)?")
// resourceForURL returns a resource object for the given URL.
func (srv *Server) resourceForURL(u *url.URL) (r resource) {
m := pathRegexp.FindStringSubmatch(u.Path)
if m == nil {
fatalf(404, "InvalidURI", "Couldn't parse the specified URI")
}
bucketName := m[2]
objectName := m[4]
if bucketName == "" {
return nullResource{} // root
}
b := bucketResource{
name: bucketName,
bucket: srv.buckets[bucketName],
}
q := u.Query()
if objectName == "" {
for name := range q {
if unimplementedBucketResourceNames[name] {
return nullResource{}
}
}
return b
}
if b.bucket == nil {
fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
}
objr := objectResource{
name: objectName,
version: q.Get("versionId"),
bucket: b.bucket,
}
for name := range q {
if unimplementedObjectResourceNames[name] {
return nullResource{}
}
}
if obj := objr.bucket.objects[objr.name]; obj != nil {
objr.object = obj
}
return objr
}
// nullResource has error stubs for all resource methods.
type nullResource struct{}
func notAllowed() interface{} {
fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
return nil
}
func (nullResource) put(a *action) interface{} { return notAllowed() }
func (nullResource) get(a *action) interface{} { return notAllowed() }
func (nullResource) post(a *action) interface{} { return notAllowed() }
func (nullResource) delete(a *action) interface{} { return notAllowed() }
const timeFormat = "2006-01-02T15:04:05.000Z07:00"
type bucketResource struct {
name string
bucket *bucket // non-nil if the bucket already exists.
}
// GET on a bucket lists the objects in the bucket.
// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html
func (r bucketResource) get(a *action) interface{} {
if r.bucket == nil {
fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
}
delimiter := a.req.Form.Get("delimiter")
marker := a.req.Form.Get("marker")
maxKeys := -1
if s := a.req.Form.Get("max-keys"); s != "" {
i, err := strconv.Atoi(s)
if err != nil || i < 0 {
fatalf(400, "invalid value for max-keys: %q", s)
}
maxKeys = i
}
prefix := a.req.Form.Get("prefix")
a.w.Header().Set("Content-Type", "application/xml")
if a.req.Method == "HEAD" {
return nil
}
var objs orderedObjects
// first get all matching objects and arrange them in alphabetical order.
for name, obj := range r.bucket.objects {
if strings.HasPrefix(name, prefix) {
objs = append(objs, obj)
}
}
sort.Sort(objs)
if maxKeys <= 0 {
maxKeys = 1000
}
resp := &s3.ListResp{
Name: r.bucket.name,
Prefix: prefix,
Delimiter: delimiter,
Marker: marker,
MaxKeys: maxKeys,
}
var prefixes []string
for _, obj := range objs {
if !strings.HasPrefix(obj.name, prefix) {
continue
}
name := obj.name
isPrefix := false
if delimiter != "" {
if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 {
name = obj.name[:len(prefix)+i+len(delimiter)]
if prefixes != nil && prefixes[len(prefixes)-1] == name {
continue
}
isPrefix = true
}
}
if name <= marker {
continue
}
if len(resp.Contents)+len(prefixes) >= maxKeys {
resp.IsTruncated = true
break
}
if isPrefix {
prefixes = append(prefixes, name)
} else {
// Contents contains only keys not found in CommonPrefixes
resp.Contents = append(resp.Contents, obj.s3Key())
}
}
resp.CommonPrefixes = prefixes
return resp
}
// orderedObjects holds a slice of objects that can be sorted
// by name.
type orderedObjects []*object
func (s orderedObjects) Len() int {
return len(s)
}
func (s orderedObjects) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s orderedObjects) Less(i, j int) bool {
return s[i].name < s[j].name
}
func (obj *object) s3Key() s3.Key {
return s3.Key{
Key: obj.name,
LastModified: obj.mtime.Format(timeFormat),
Size: int64(len(obj.data)),
ETag: fmt.Sprintf(`"%x"`, obj.checksum),
// TODO StorageClass
// TODO Owner
}
}
// DELETE on a bucket deletes the bucket if it's not empty.
func (r bucketResource) delete(a *action) interface{} {
b := r.bucket
if b == nil {
fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
}
if len(b.objects) > 0 {
fatalf(400, "BucketNotEmpty", "The bucket you tried to delete is not empty")
}
delete(a.srv.buckets, b.name)
return nil
}
// PUT on a bucket creates the bucket.
// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html
func (r bucketResource) put(a *action) interface{} {
var created bool
if r.bucket == nil {
if !validBucketName(r.name) {
fatalf(400, "InvalidBucketName", "The specified bucket is not valid")
}
if loc := locationConstraint(a); loc == "" {
fatalf(400, "InvalidRequets", "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.")
}
// TODO validate acl
r.bucket = &bucket{
name: r.name,
// TODO default acl
objects: make(map[string]*object),
}
a.srv.buckets[r.name] = r.bucket
created = true
}
if !created && a.srv.config.send409Conflict() {
fatalf(409, "BucketAlreadyOwnedByYou", "Your previous request to create the named bucket succeeded and you already own it.")
}
r.bucket.acl = s3.ACL(a.req.Header.Get("x-amz-acl"))
return nil
}
func (bucketResource) post(a *action) interface{} {
fatalf(400, "Method", "bucket POST method not available")
return nil
}
// validBucketName returns whether name is a valid bucket name.
// Here are the rules, from:
// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html
//
// Can contain lowercase letters, numbers, periods (.), underscores (_),
// and dashes (-). You can use uppercase letters for buckets only in the
// US Standard region.
//
// Must start with a number or letter
//
// Must be between 3 and 255 characters long
//
// There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4)
// but the real S3 server does not seem to check that rule, so we will not
// check it either.
//
func validBucketName(name string) bool {
if len(name) < 3 || len(name) > 255 {
return false
}
r := name[0]
if !(r >= '0' && r <= '9' || r >= 'a' && r <= 'z') {
return false
}
for _, r := range name {
switch {
case r >= '0' && r <= '9':
case r >= 'a' && r <= 'z':
case r == '_' || r == '-':
case r == '.':
default:
return false
}
}
return true
}
var responseParams = map[string]bool{
"content-type": true,
"content-language": true,
"expires": true,
"cache-control": true,
"content-disposition": true,
"content-encoding": true,
}
type objectResource struct {
name string
version string
bucket *bucket // always non-nil.
object *object // may be nil.
}
// GET on an object gets the contents of the object.
// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html
func (objr objectResource) get(a *action) interface{} {
obj := objr.object
if obj == nil {
fatalf(404, "NoSuchKey", "The specified key does not exist.")
}
h := a.w.Header()
// add metadata
for name, d := range obj.meta {
h[name] = d
}
// override header values in response to request parameters.
for name, vals := range a.req.Form {
if strings.HasPrefix(name, "response-") {
name = name[len("response-"):]
if !responseParams[name] {
continue
}
h.Set(name, vals[0])
}
}
if r := a.req.Header.Get("Range"); r != "" {
fatalf(400, "NotImplemented", "range unimplemented")
}
// TODO Last-Modified-Since
// TODO If-Modified-Since
// TODO If-Unmodified-Since
// TODO If-Match
// TODO If-None-Match
// TODO Connection: close ??
// TODO x-amz-request-id
h.Set("Content-Length", fmt.Sprint(len(obj.data)))
h.Set("ETag", hex.EncodeToString(obj.checksum))
h.Set("Last-Modified", obj.mtime.Format(time.RFC1123))
if a.req.Method == "HEAD" {
return nil
}
// TODO avoid holding the lock when writing data.
_, err := a.w.Write(obj.data)
if err != nil {
// we can't do much except just log the fact.
log.Printf("error writing data: %v", err)
}
return nil
}
var metaHeaders = map[string]bool{
"Content-MD5": true,
"x-amz-acl": true,
"Content-Type": true,
"Content-Encoding": true,
"Content-Disposition": true,
}
// PUT on an object creates the object.
func (objr objectResource) put(a *action) interface{} {
// TODO Cache-Control header
// TODO Expires header
// TODO x-amz-server-side-encryption
// TODO x-amz-storage-class
// TODO is this correct, or should we erase all previous metadata?
obj := objr.object
if obj == nil {
obj = &object{
name: objr.name,
meta: make(http.Header),
}
}
var expectHash []byte
if c := a.req.Header.Get("Content-MD5"); c != "" {
var err error
expectHash, err = hex.DecodeString(c)
if err != nil || len(expectHash) != md5.Size {
fatalf(400, "InvalidDigest", "The Content-MD5 you specified was invalid")
}
}
sum := md5.New()
// TODO avoid holding lock while reading data.
data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum))
if err != nil {
fatalf(400, "TODO", "read error")
}
gotHash := sum.Sum(nil)
if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 {
fatalf(400, "BadDigest", "The Content-MD5 you specified did not match what we received")
}
if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength {
fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header")
}
// PUT request has been successful - save data and metadata
for key, values := range a.req.Header {
key = http.CanonicalHeaderKey(key)
if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") {
obj.meta[key] = values
}
}
obj.data = data
obj.checksum = gotHash
obj.mtime = time.Now()
objr.bucket.objects[objr.name] = obj
return nil
}
func (objr objectResource) delete(a *action) interface{} {
delete(objr.bucket.objects, objr.name)
return nil
}
func (objr objectResource) post(a *action) interface{} {
fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
return nil
}
type CreateBucketConfiguration struct {
LocationConstraint string
}
// locationConstraint parses the <CreateBucketConfiguration /> request body (if present).
// If there is no body, an empty string will be returned.
func locationConstraint(a *action) string {
var body bytes.Buffer
if _, err := io.Copy(&body, a.req.Body); err != nil {
fatalf(400, "InvalidRequest", err.Error())
}
if body.Len() == 0 {
return ""
}
var loc CreateBucketConfiguration
if err := xml.NewDecoder(&body).Decode(&loc); err != nil {
fatalf(400, "InvalidRequest", err.Error())
}
return loc.LocationConstraint
}

View File

@ -3,13 +3,12 @@ package s3
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"gopkg.in/amz.v3/aws"
"gopkg.in/amz.v3/s3"
"github.com/minio/minio-go"
"github.com/restic/restic/backend"
)
@ -26,41 +25,47 @@ func s3path(t backend.Type, name string) string {
}
type S3Backend struct {
bucket *s3.Bucket
connChan chan struct{}
path string
s3api minio.API
connChan chan struct{}
bucketname string
}
// Open a backend using an S3 bucket object
func OpenS3Bucket(bucket *s3.Bucket, bucketname string) *S3Backend {
// Open opens the S3 backend at bucket and region.
func Open(regionname, bucketname string) (backend.Backend, error) {
config := minio.Config{
AccessKeyID: os.Getenv("AWS_ACCESS_KEY_ID"),
SecretAccessKey: os.Getenv("AWS_SECRET_ACCESS_KEY"),
}
if !strings.Contains(regionname, ".") {
// Amazon region name
switch regionname {
case "us-east-1":
config.Endpoint = "https://s3.amazonaws.com"
default:
config.Endpoint = "https://s3-" + regionname + ".amazonaws.com"
}
} else {
// S3 compatible endpoint
config.Endpoint = "https://" + regionname
}
s3api, s3err := minio.New(config)
if s3err != nil {
return nil, s3err
}
connChan := make(chan struct{}, connLimit)
for i := 0; i < connLimit; i++ {
connChan <- struct{}{}
}
return &S3Backend{bucket: bucket, path: bucketname, connChan: connChan}
}
// Open opens the S3 backend at bucket and region.
func Open(regionname, bucketname string) (backend.Backend, error) {
auth, err := aws.EnvAuth()
if err != nil {
return nil, err
}
client := s3.New(auth, aws.Regions[regionname])
s3bucket, s3err := client.Bucket(bucketname)
if s3err != nil {
return nil, s3err
}
return OpenS3Bucket(s3bucket, bucketname), nil
return &S3Backend{s3api: s3api, bucketname: bucketname, connChan: connChan}, nil
}
// Location returns this backend's location (the bucket name).
func (be *S3Backend) Location() string {
return be.path
return be.bucketname
}
type s3Blob struct {
@ -102,13 +107,13 @@ func (bb *s3Blob) Finalize(t backend.Type, name string) error {
path := s3path(t, name)
// Check key does not already exist
_, err := bb.b.bucket.GetReader(path)
_, err := bb.b.s3api.StatObject(bb.b.bucketname, path)
if err == nil {
return errors.New("key already exists!")
}
<-bb.b.connChan
err = bb.b.bucket.PutReader(path, bb.buf, int64(bb.buf.Len()), "binary/octet-stream", "private")
err = bb.b.s3api.PutObject(bb.b.bucketname, path, "application/octet-stream", int64(bb.buf.Len()), bb.buf)
bb.b.connChan <- struct{}{}
bb.buf.Reset()
return err
@ -129,36 +134,25 @@ func (be *S3Backend) Create() (backend.Blob, error) {
// name. The reader should be closed after draining it.
func (be *S3Backend) Get(t backend.Type, name string) (io.ReadCloser, error) {
path := s3path(t, name)
return be.bucket.GetReader(path)
r, _, err := be.s3api.GetObject(be.bucketname, path)
rc := ioutil.NopCloser(r)
return rc, err
}
// GetReader returns an io.ReadCloser for the Blob with the given name of
// type t at offset and length. If length is 0, the reader reads until EOF.
func (be *S3Backend) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) {
rc, err := be.Get(t, name)
if err != nil {
return nil, err
}
n, errc := io.CopyN(ioutil.Discard, rc, int64(offset))
if errc != nil {
return nil, errc
} else if n != int64(offset) {
return nil, fmt.Errorf("less bytes read than expected, read: %d, expected: %d", n, offset)
}
if length == 0 {
return rc, nil
}
return backend.LimitReadCloser(rc, int64(length)), nil
path := s3path(t, name)
r, _, err := be.s3api.GetPartialObject(be.bucketname, path, int64(offset), int64(length))
rc := ioutil.NopCloser(r)
return rc, err
}
// Test returns true if a blob of the given type and name exists in the backend.
func (be *S3Backend) Test(t backend.Type, name string) (bool, error) {
found := false
path := s3path(t, name)
_, err := be.bucket.GetReader(path)
_, err := be.s3api.StatObject(be.bucketname, path)
if err == nil {
found = true
}
@ -170,7 +164,7 @@ func (be *S3Backend) Test(t backend.Type, name string) (bool, error) {
// Remove removes the blob with the given name and type.
func (be *S3Backend) Remove(t backend.Type, name string) error {
path := s3path(t, name)
return be.bucket.Del(path)
return be.s3api.RemoveObject(be.bucketname, path)
}
// List returns a channel that yields all names of blobs of type t. A
@ -181,34 +175,12 @@ func (be *S3Backend) List(t backend.Type, done <-chan struct{}) <-chan string {
prefix := s3path(t, "")
listresp, err := be.bucket.List(prefix, "/", "", maxKeysInList)
if err != nil {
close(ch)
return ch
}
matches := make([]string, len(listresp.Contents))
for idx, key := range listresp.Contents {
matches[idx] = strings.TrimPrefix(key.Key, prefix)
}
// Continue making requests to get full list.
for listresp.IsTruncated {
listresp, err = be.bucket.List(prefix, "/", listresp.NextMarker, maxKeysInList)
if err != nil {
close(ch)
return ch
}
for _, key := range listresp.Contents {
matches = append(matches, strings.TrimPrefix(key.Key, prefix))
}
}
listresp := be.s3api.ListObjects(be.bucketname, prefix, true)
go func() {
defer close(ch)
for _, m := range matches {
for obj := range listresp {
m := strings.TrimPrefix(obj.Stat.Key, prefix)
if m == "" {
continue
}

View File

@ -3,48 +3,12 @@ package backend_test
import (
"testing"
"gopkg.in/amz.v3/aws"
"gopkg.in/amz.v3/s3"
"gopkg.in/amz.v3/s3/s3test"
bes3 "github.com/restic/restic/backend/s3"
. "github.com/restic/restic/test"
)
type LocalServer struct {
auth aws.Auth
region aws.Region
srv *s3test.Server
config *s3test.Config
}
var s LocalServer
func setupS3Backend(t *testing.T) *bes3.S3Backend {
s.config = &s3test.Config{
Send409Conflict: true,
}
srv, err := s3test.NewServer(s.config)
OK(t, err)
s.srv = srv
s.region = aws.Region{
Name: "faux-region-1",
S3Endpoint: srv.URL(),
S3LocationConstraint: true, // s3test server requires a LocationConstraint
}
s.auth = aws.Auth{"abc", "123"}
service := s3.New(s.auth, s.region)
bucket, berr := service.Bucket("testbucket")
OK(t, berr)
err = bucket.PutBucket("private")
OK(t, err)
t.Logf("created s3 backend locally")
return bes3.OpenS3Bucket(bucket, "testbucket")
return bes3.Open("play.minio.io:9000", "restictestbucket")
}
func TestS3Backend(t *testing.T) {