From 6d1552af511aa4017a11eb3cd6983027da0202b0 Mon Sep 17 00:00:00 2001 From: Chris Howey Date: Fri, 6 Nov 2015 15:31:59 -0600 Subject: [PATCH 01/55] Switch s3 library to allow for s3 compatible backends. Fixes #315 --- Godeps/Godeps.json | 13 +- .../src/github.com/minio/minio-go/.gitignore | 2 + .../src/github.com/minio/minio-go/.travis.yml | 9 + .../github.com/minio/minio-go/CONTRIBUTING.md | 21 + .../src/github.com/minio/minio-go/LICENSE | 202 +++ .../src/github.com/minio/minio-go/README.md | 71 ++ .../src/github.com/minio/minio-go/api-core.go | 885 +++++++++++++ .../minio/minio-go/api-multipart-core.go | 329 +++++ .../src/github.com/minio/minio-go/api.go | 1114 +++++++++++++++++ .../minio/minio-go/api_handlers_test.go | 170 +++ .../minio/minio-go/api_private_test.go | 110 ++ .../minio/minio-go/api_public_test.go | 287 +++++ .../github.com/minio/minio-go/appveyor.yml | 41 + .../github.com/minio/minio-go/bucket-acl.go | 75 ++ .../src/github.com/minio/minio-go/chopper.go | 136 ++ .../src/github.com/minio/minio-go/common.go | 115 ++ .../github.com/minio/minio-go/definitions.go | 181 +++ .../src/github.com/minio/minio-go/errors.go | 168 +++ .../minio-go/examples/play/bucketexists.go | 40 + .../minio-go/examples/play/getbucketacl.go | 41 + .../minio/minio-go/examples/play/getobject.go | 51 + .../examples/play/getpartialobject.go | 51 + .../minio-go/examples/play/listbuckets.go | 41 + .../examples/play/listincompleteuploads.go | 44 + .../minio-go/examples/play/listobjects.go | 41 + .../minio-go/examples/play/makebucket.go | 40 + .../minio/minio-go/examples/play/putobject.go | 52 + .../minio-go/examples/play/removebucket.go | 43 + .../examples/play/removeincompleteupload.go | 41 + .../minio-go/examples/play/removeobject.go | 42 + .../minio-go/examples/play/setbucketacl.go | 40 + .../minio-go/examples/play/statobject.go | 40 + .../minio-go/examples/s3/bucketexists.go | 42 + .../minio-go/examples/s3/getbucketacl.go | 43 + .../minio/minio-go/examples/s3/getobject.go | 53 + .../minio-go/examples/s3/getpartialobject.go | 53 + .../minio/minio-go/examples/s3/listbuckets.go | 43 + .../examples/s3/listincompleteuploads.go | 44 + .../minio/minio-go/examples/s3/listobjects.go | 43 + .../minio/minio-go/examples/s3/makebucket.go | 42 + .../examples/s3/presignedgetobject.go | 43 + .../examples/s3/presignedpostpolicy.go | 54 + .../examples/s3/presignedputobject.go | 43 + .../minio/minio-go/examples/s3/putobject.go | 54 + .../minio-go/examples/s3/removebucket.go | 43 + .../examples/s3/removeincompleteupload.go | 43 + .../minio-go/examples/s3/removeobject.go | 42 + .../minio-go/examples/s3/setbucketacl.go | 42 + .../minio/minio-go/examples/s3/statobject.go | 42 + .../github.com/minio/minio-go/post-policy.go | 152 +++ .../src/github.com/minio/minio-go/request.go | 498 ++++++++ .../src/gopkg.in/amz.v3/aws/attempt.go | 74 -- .../src/gopkg.in/amz.v3/aws/attempt_test.go | 59 - .../_workspace/src/gopkg.in/amz.v3/aws/aws.go | 268 ---- .../src/gopkg.in/amz.v3/aws/aws_test.go | 84 -- .../src/gopkg.in/amz.v3/aws/sign.go | 447 ------- .../src/gopkg.in/amz.v3/aws/sign_test.go | 285 ----- .../src/gopkg.in/amz.v3/s3/export_test.go | 33 - .../src/gopkg.in/amz.v3/s3/multi.go | 502 -------- .../src/gopkg.in/amz.v3/s3/multi_test.go | 383 ------ .../src/gopkg.in/amz.v3/s3/responses_test.go | 198 --- .../_workspace/src/gopkg.in/amz.v3/s3/s3.go | 566 --------- .../src/gopkg.in/amz.v3/s3/s3_test.go | 321 ----- .../src/gopkg.in/amz.v3/s3/s3i_test.go | 610 --------- .../src/gopkg.in/amz.v3/s3/s3t_test.go | 77 -- .../src/gopkg.in/amz.v3/s3/s3test/server.go | 629 ---------- backend/s3/s3.go | 120 +- backend/s3_test.go | 38 +- 68 files changed, 5994 insertions(+), 4655 deletions(-) create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/.gitignore create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/CONTRIBUTING.md create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/LICENSE create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/README.md create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-core.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-multipart-core.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api_handlers_test.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api_public_test.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/bucket-acl.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/chopper.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/common.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/definitions.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/errors.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/bucketexists.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getbucketacl.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobject.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getpartialobject.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listbuckets.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listincompleteuploads.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listobjects.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/makebucket.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobject.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removebucket.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeincompleteupload.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeobject.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/setbucketacl.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/statobject.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/bucketexists.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getbucketacl.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobject.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getpartialobject.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listbuckets.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listincompleteuploads.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listobjects.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/makebucket.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedgetobject.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedputobject.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobject.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removebucket.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeincompleteupload.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeobject.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/setbucketacl.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/statobject.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/post-policy.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/request.go delete mode 100644 Godeps/_workspace/src/gopkg.in/amz.v3/aws/attempt.go delete mode 100644 Godeps/_workspace/src/gopkg.in/amz.v3/aws/attempt_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/amz.v3/aws/aws.go delete mode 100644 Godeps/_workspace/src/gopkg.in/amz.v3/aws/aws_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/amz.v3/aws/sign.go delete mode 100644 Godeps/_workspace/src/gopkg.in/amz.v3/aws/sign_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/amz.v3/s3/export_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/amz.v3/s3/multi.go delete mode 100644 Godeps/_workspace/src/gopkg.in/amz.v3/s3/multi_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/amz.v3/s3/responses_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3.go delete mode 100644 Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3i_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3t_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3test/server.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index d3ee6153c..01bcb29b5 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -22,6 +22,11 @@ "ImportPath": "github.com/kr/fs", "Rev": "2788f0dbd16903de03cb8186e5c7d97b69ad387b" }, + { + "ImportPath": "github.com/minio/minio-go", + "Comment": "v0.2.5-58-g5c3a491", + "Rev": "5c3a4919116141f088990bd6ee385877648c7a25" + }, { "ImportPath": "github.com/pkg/sftp", "Rev": "518aed2757a65cfa64d4b1b2baf08410f8b7a6bc" @@ -49,14 +54,6 @@ { "ImportPath": "golang.org/x/net/context", "Rev": "7654728e381988afd88e58cabfd6363a5ea91810" - }, - { - "ImportPath": "gopkg.in/amz.v3/aws", - "Rev": "bff3a097c4108da57bb8cbe3aad2990d74d23676" - }, - { - "ImportPath": "gopkg.in/amz.v3/s3", - "Rev": "bff3a097c4108da57bb8cbe3aad2990d74d23676" } ] } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/.gitignore b/Godeps/_workspace/src/github.com/minio/minio-go/.gitignore new file mode 100644 index 000000000..acf19db3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/.gitignore @@ -0,0 +1,2 @@ +*~ +*.test \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/.travis.yml b/Godeps/_workspace/src/github.com/minio/minio-go/.travis.yml new file mode 100644 index 000000000..01078a5e7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: +- 1.5.1 +script: +- go vet ./... +- go test -race -v ./... +notifications: + slack: + secure: HrOX2k6F/sEl6Rr4m5vHOdJCIwV42be0kz1Jy/WSMvrl/fQ8YkldKviLeWh4aWt1kclsYhNQ4FqGML+RIZYsdOqej4fAw9Vi5pZkI1MzPJq0UjrtMqkqzvD90eDGQYCKwaXjEIN8cohwJeb6X0B0HKAd9sqJW5GH5SwnhH5WWP8= diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/minio/minio-go/CONTRIBUTING.md new file mode 100644 index 000000000..b4b224eef --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/CONTRIBUTING.md @@ -0,0 +1,21 @@ + +### Developer Guidelines + +``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following: + +* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes. + - Fork it + - Create your feature branch (git checkout -b my-new-feature) + - Commit your changes (git commit -am 'Add some feature') + - Push to the branch (git push origin my-new-feature) + - Create new Pull Request + +* When you're ready to create a pull request, be sure to: + - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request. + - Run `go fmt` + - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request. + - Make sure `go test -race ./...` and `go build` completes. + +* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project + - `minio-go` project is strictly conformant with Golang style + - if you happen to observe offending code, please feel free to send a pull request diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/LICENSE b/Godeps/_workspace/src/github.com/minio/minio-go/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/README.md b/Godeps/_workspace/src/github.com/minio/minio-go/README.md new file mode 100644 index 000000000..bda9123a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/README.md @@ -0,0 +1,71 @@ +# Minio Go Library for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + +## Install + +```sh +$ go get github.com/minio/minio-go +``` +## Example + +```go +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://s3.amazonaws.com", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + for bucket := range s3Client.ListBuckets() { + if bucket.Err != nil { + log.Fatalln(bucket.Err) + } + log.Println(bucket.Stat) + } +} +``` + +## Documentation + +### Bucket Level +* [MakeBucket(bucket, acl) error](examples/s3/makebucket.go) +* [BucketExists(bucket) error](examples/s3/bucketexists.go) +* [RemoveBucket(bucket) error](examples/s3/removebucket.go) +* [GetBucketACL(bucket) (BucketACL, error)](examples/s3/getbucketacl.go) +* [SetBucketACL(bucket, BucketACL) error)](examples/s3/setbucketacl.go) +* [ListBuckets() <-chan BucketStat](examples/s3/listbuckets.go) +* [ListObjects(bucket, prefix, recursive) <-chan ObjectStat](examples/s3/listobjects.go) +* [ListIncompleteUploads(bucket, prefix, recursive) <-chan ObjectMultipartStat](examples/s3/listincompleteuploads.go) + +### Object Level +* [PutObject(bucket, object, size, io.Reader) error](examples/s3/putobject.go) +* [GetObject(bucket, object) (io.Reader, ObjectStat, error)](examples/s3/getobject.go) +* [GetPartialObject(bucket, object, offset, length) (io.Reader, ObjectStat, error)](examples/s3/getpartialobject.go) +* [StatObject(bucket, object) (ObjectStat, error)](examples/s3/statobject.go) +* [RemoveObject(bucket, object) error](examples/s3/removeobject.go) +* [RemoveIncompleteUpload(bucket, object) <-chan error](examples/s3/removeincompleteupload.go) + +### Presigned Bucket/Object Level +* [PresignedGetObject(bucket, object, time.Duration) (string, error)](examples/s3/presignedgetobject.go) +* [PresignedPutObject(bucket, object, time.Duration) (string, error)](examples/s3/presignedputobject.go) +* [PresignedPostPolicy(NewPostPolicy()) (map[string]string, error)](examples/s3/presignedpostpolicy.go) + +### API Reference + +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/minio/minio-go) + +## Contribute + +[Contributors Guide](./CONTRIBUTING.md) + +[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) [![Build status](https://ci.appveyor.com/api/projects/status/1ep7n2resn6fk1w6?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go) diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-core.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-core.go new file mode 100644 index 000000000..fd9c23a45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-core.go @@ -0,0 +1,885 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +const ( + separator = "/" +) + +// apiCore container to hold unexported internal functions +type apiCore struct { + config *Config +} + +// closeResp close non nil response with any response Body +func closeResp(resp *http.Response) { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } +} + +// putBucketRequest wrapper creates a new putBucket request +func (a apiCore) putBucketRequest(bucket, acl, location string) (*request, error) { + var r *request + var err error + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "PUT", + HTTPPath: separator + bucket, + } + var createBucketConfigBuffer *bytes.Reader + // If location is set use it and create proper bucket configuration + switch { + case location != "": + createBucketConfig := new(createBucketConfiguration) + createBucketConfig.Location = location + var createBucketConfigBytes []byte + switch { + case a.config.AcceptType == "application/xml": + createBucketConfigBytes, err = xml.Marshal(createBucketConfig) + case a.config.AcceptType == "application/json": + createBucketConfigBytes, err = json.Marshal(createBucketConfig) + default: + createBucketConfigBytes, err = xml.Marshal(createBucketConfig) + } + if err != nil { + return nil, err + } + createBucketConfigBuffer = bytes.NewReader(createBucketConfigBytes) + } + switch { + case createBucketConfigBuffer == nil: + r, err = newRequest(op, a.config, nil) + if err != nil { + return nil, err + } + default: + r, err = newRequest(op, a.config, createBucketConfigBuffer) + if err != nil { + return nil, err + } + r.req.ContentLength = int64(createBucketConfigBuffer.Len()) + } + // by default bucket is private + switch { + case acl != "": + r.Set("x-amz-acl", acl) + default: + r.Set("x-amz-acl", "private") + } + + return r, nil +} + +/// Bucket Write Operations + +// putBucket create a new bucket +// +// Requires valid AWS Access Key ID to authenticate requests +// Anonymous requests are never allowed to create buckets +// +// optional arguments are acl and location - by default all buckets are created +// with ``private`` acl and location set to US Standard if one wishes to set +// different ACLs and Location one can set them properly. +// +// ACL valid values +// ------------------ +// private - owner gets full access [DEFAULT] +// public-read - owner gets full access, others get read access +// public-read-write - owner gets full access, others get full access too +// authenticated-read - owner gets full access, authenticated users get read access +// ------------------ +// +// Location valid values +// ------------------ +// [ us-west-1 | us-west-2 | eu-west-1 | eu-central-1 | ap-southeast-1 | ap-northeast-1 | ap-southeast-2 | sa-east-1 ] +// +// Default - US standard +func (a apiCore) putBucket(bucket, acl, location string) error { + req, err := a.putBucketRequest(bucket, acl, location) + if err != nil { + return err + } + resp, err := req.Do() + defer closeResp(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return BodyToErrorResponse(resp.Body, a.config.AcceptType) + } + } + return nil +} + +// putBucketRequestACL wrapper creates a new putBucketACL request +func (a apiCore) putBucketACLRequest(bucket, acl string) (*request, error) { + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "PUT", + HTTPPath: separator + bucket + "?acl", + } + req, err := newRequest(op, a.config, nil) + if err != nil { + return nil, err + } + req.Set("x-amz-acl", acl) + return req, nil +} + +// putBucketACL set the permissions on an existing bucket using Canned ACL's +func (a apiCore) putBucketACL(bucket, acl string) error { + req, err := a.putBucketACLRequest(bucket, acl) + if err != nil { + return err + } + resp, err := req.Do() + defer closeResp(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return BodyToErrorResponse(resp.Body, a.config.AcceptType) + } + } + return nil +} + +// getBucketACLRequest wrapper creates a new getBucketACL request +func (a apiCore) getBucketACLRequest(bucket string) (*request, error) { + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "GET", + HTTPPath: separator + bucket + "?acl", + } + req, err := newRequest(op, a.config, nil) + if err != nil { + return nil, err + } + return req, nil +} + +// getBucketACL get the acl information on an existing bucket +func (a apiCore) getBucketACL(bucket string) (accessControlPolicy, error) { + req, err := a.getBucketACLRequest(bucket) + if err != nil { + return accessControlPolicy{}, err + } + resp, err := req.Do() + defer closeResp(resp) + if err != nil { + return accessControlPolicy{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return accessControlPolicy{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) + } + } + policy := accessControlPolicy{} + err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &policy) + if err != nil { + return accessControlPolicy{}, err + } + if policy.AccessControlList.Grant == nil { + errorResponse := ErrorResponse{ + Code: "InternalError", + Message: "Access control Grant list is empty, please report this at https://github.com/minio/minio-go/issues", + Resource: separator + bucket, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + } + return accessControlPolicy{}, errorResponse + } + return policy, nil +} + +// getBucketLocationRequest wrapper creates a new getBucketLocation request +func (a apiCore) getBucketLocationRequest(bucket string) (*request, error) { + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "GET", + HTTPPath: separator + bucket + "?location", + } + req, err := newRequest(op, a.config, nil) + if err != nil { + return nil, err + } + return req, nil +} + +// getBucketLocation uses location subresource to return a bucket's region +func (a apiCore) getBucketLocation(bucket string) (string, error) { + req, err := a.getBucketLocationRequest(bucket) + if err != nil { + return "", err + } + resp, err := req.Do() + defer closeResp(resp) + if err != nil { + return "", err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", BodyToErrorResponse(resp.Body, a.config.AcceptType) + } + } + var locationConstraint string + err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &locationConstraint) + if err != nil { + return "", err + } + return locationConstraint, nil +} + +// listObjectsRequest wrapper creates a new listObjects request +func (a apiCore) listObjectsRequest(bucket, marker, prefix, delimiter string, maxkeys int) (*request, error) { + // resourceQuery - get resources properly escaped and lined up before using them in http request + resourceQuery := func() (*string, error) { + switch { + case marker != "": + marker = fmt.Sprintf("&marker=%s", getURLEncodedPath(marker)) + fallthrough + case prefix != "": + prefix = fmt.Sprintf("&prefix=%s", getURLEncodedPath(prefix)) + fallthrough + case delimiter != "": + delimiter = fmt.Sprintf("&delimiter=%s", delimiter) + } + query := fmt.Sprintf("?max-keys=%d", maxkeys) + marker + prefix + delimiter + return &query, nil + } + query, err := resourceQuery() + if err != nil { + return nil, err + } + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "GET", + HTTPPath: separator + bucket + *query, + } + r, err := newRequest(op, a.config, nil) + if err != nil { + return nil, err + } + return r, nil +} + +/// Bucket Read Operations + +// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request paramters :- +// --------- +// ?marker - Specifies the key to start with when listing objects in a bucket. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (a apiCore) listObjects(bucket, marker, prefix, delimiter string, maxkeys int) (listBucketResult, error) { + if err := invalidBucketError(bucket); err != nil { + return listBucketResult{}, err + } + req, err := a.listObjectsRequest(bucket, marker, prefix, delimiter, maxkeys) + if err != nil { + return listBucketResult{}, err + } + resp, err := req.Do() + defer closeResp(resp) + if err != nil { + return listBucketResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return listBucketResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) + } + } + listBucketResult := listBucketResult{} + err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &listBucketResult) + if err != nil { + return listBucketResult, err + } + // close body while returning, along with any error + return listBucketResult, nil +} + +// headBucketRequest wrapper creates a new headBucket request +func (a apiCore) headBucketRequest(bucket string) (*request, error) { + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "HEAD", + HTTPPath: separator + bucket, + } + return newRequest(op, a.config, nil) +} + +// headBucket useful to determine if a bucket exists and you have permission to access it. +func (a apiCore) headBucket(bucket string) error { + if err := invalidBucketError(bucket); err != nil { + return err + } + req, err := a.headBucketRequest(bucket) + if err != nil { + return err + } + resp, err := req.Do() + defer closeResp(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + // Head has no response body, handle it + var errorResponse ErrorResponse + switch resp.StatusCode { + case http.StatusNotFound: + errorResponse = ErrorResponse{ + Code: "NoSuchBucket", + Message: "The specified bucket does not exist.", + Resource: separator + bucket, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + } + case http.StatusForbidden: + errorResponse = ErrorResponse{ + Code: "AccessDenied", + Message: "Access Denied", + Resource: separator + bucket, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + } + default: + errorResponse = ErrorResponse{ + Code: resp.Status, + Message: resp.Status, + Resource: separator + bucket, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + } + } + return errorResponse + } + } + return nil +} + +// deleteBucketRequest wrapper creates a new deleteBucket request +func (a apiCore) deleteBucketRequest(bucket string) (*request, error) { + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "DELETE", + HTTPPath: separator + bucket, + } + return newRequest(op, a.config, nil) +} + +// deleteBucket deletes the bucket named in the URI +// +// NOTE: - +// All objects (including all object versions and delete markers) +// in the bucket must be deleted before successfully attempting this request +func (a apiCore) deleteBucket(bucket string) error { + if err := invalidBucketError(bucket); err != nil { + return err + } + req, err := a.deleteBucketRequest(bucket) + if err != nil { + return err + } + resp, err := req.Do() + defer closeResp(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + var errorResponse ErrorResponse + switch resp.StatusCode { + case http.StatusNotFound: + errorResponse = ErrorResponse{ + Code: "NoSuchBucket", + Message: "The specified bucket does not exist.", + Resource: separator + bucket, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + } + case http.StatusForbidden: + errorResponse = ErrorResponse{ + Code: "AccessDenied", + Message: "Access Denied", + Resource: separator + bucket, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + } + case http.StatusConflict: + errorResponse = ErrorResponse{ + Code: "Conflict", + Message: "Bucket not empty", + Resource: separator + bucket, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + } + default: + errorResponse = ErrorResponse{ + Code: resp.Status, + Message: resp.Status, + Resource: separator + bucket, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + } + } + return errorResponse + } + } + return nil +} + +/// Object Read/Write/Stat Operations + +func (a apiCore) putObjectUnAuthenticatedRequest(bucket, object, contentType string, size int64, body io.Reader) (*request, error) { + if strings.TrimSpace(contentType) == "" { + contentType = "application/octet-stream" + } + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "PUT", + HTTPPath: separator + bucket + separator + object, + } + r, err := newUnauthenticatedRequest(op, a.config, body) + if err != nil { + return nil, err + } + // Content-MD5 is not set consciously + r.Set("Content-Type", contentType) + r.req.ContentLength = size + return r, nil +} + +// putObjectUnAuthenticated - add an object to a bucket +// NOTE: You must have WRITE permissions on a bucket to add an object to it. +func (a apiCore) putObjectUnAuthenticated(bucket, object, contentType string, size int64, body io.Reader) (ObjectStat, error) { + req, err := a.putObjectUnAuthenticatedRequest(bucket, object, contentType, size, body) + if err != nil { + return ObjectStat{}, err + } + resp, err := req.Do() + defer closeResp(resp) + if err != nil { + return ObjectStat{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ObjectStat{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) + } + } + var metadata ObjectStat + metadata.ETag = strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes + return metadata, nil +} + +// putObjectRequest wrapper creates a new PutObject request +func (a apiCore) putObjectRequest(bucket, object, contentType string, md5SumBytes []byte, size int64, body io.ReadSeeker) (*request, error) { + if strings.TrimSpace(contentType) == "" { + contentType = "application/octet-stream" + } + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "PUT", + HTTPPath: separator + bucket + separator + object, + } + r, err := newRequest(op, a.config, body) + if err != nil { + return nil, err + } + // set Content-MD5 as base64 encoded md5 + r.Set("Content-MD5", base64.StdEncoding.EncodeToString(md5SumBytes)) + r.Set("Content-Type", contentType) + r.req.ContentLength = size + return r, nil +} + +// putObject - add an object to a bucket +// NOTE: You must have WRITE permissions on a bucket to add an object to it. +func (a apiCore) putObject(bucket, object, contentType string, md5SumBytes []byte, size int64, body io.ReadSeeker) (ObjectStat, error) { + req, err := a.putObjectRequest(bucket, object, contentType, md5SumBytes, size, body) + if err != nil { + return ObjectStat{}, err + } + resp, err := req.Do() + defer closeResp(resp) + if err != nil { + return ObjectStat{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ObjectStat{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) + } + } + var metadata ObjectStat + metadata.ETag = strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes + return metadata, nil +} + +func (a apiCore) presignedPostPolicy(p *PostPolicy) map[string]string { + t := time.Now().UTC() + r := new(request) + r.config = a.config + credential := getCredential(r.config.AccessKeyID, r.config.Region, t) + p.addNewPolicy(policy{"eq", "$x-amz-date", t.Format(iso8601DateFormat)}) + p.addNewPolicy(policy{"eq", "$x-amz-algorithm", authHeader}) + p.addNewPolicy(policy{"eq", "$x-amz-credential", credential}) + + policyBase64 := p.base64() + p.formData["policy"] = policyBase64 + p.formData["x-amz-algorithm"] = authHeader + p.formData["x-amz-credential"] = credential + p.formData["x-amz-date"] = t.Format(iso8601DateFormat) + p.formData["x-amz-signature"] = r.PostPresignSignature(policyBase64, t) + return p.formData +} + +func (a apiCore) presignedPutObject(bucket, object string, expires int64) (string, error) { + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "PUT", + HTTPPath: separator + bucket + separator + object, + } + r, err := newPresignedRequest(op, a.config, strconv.FormatInt(expires, 10)) + if err != nil { + return "", err + } + return r.PreSignV4() +} + +func (a apiCore) presignedGetObjectRequest(bucket, object string, expires, offset, length int64) (*request, error) { + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "GET", + HTTPPath: separator + bucket + separator + object, + } + r, err := newPresignedRequest(op, a.config, strconv.FormatInt(expires, 10)) + if err != nil { + return nil, err + } + switch { + case length > 0 && offset > 0: + r.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) + case offset > 0 && length == 0: + r.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + case length > 0 && offset == 0: + r.Set("Range", fmt.Sprintf("bytes=-%d", length)) + } + return r, nil +} + +func (a apiCore) presignedGetObject(bucket, object string, expires, offset, length int64) (string, error) { + if err := invalidArgumentError(object); err != nil { + return "", err + } + req, err := a.presignedGetObjectRequest(bucket, object, expires, offset, length) + if err != nil { + return "", err + } + return req.PreSignV4() +} + +// getObjectRequest wrapper creates a new getObject request +func (a apiCore) getObjectRequest(bucket, object string, offset, length int64) (*request, error) { + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "GET", + HTTPPath: separator + bucket + separator + object, + } + r, err := newRequest(op, a.config, nil) + if err != nil { + return nil, err + } + switch { + case length > 0 && offset > 0: + r.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) + case offset > 0 && length == 0: + r.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + case length > 0 && offset == 0: + r.Set("Range", fmt.Sprintf("bytes=-%d", length)) + } + return r, nil +} + +// getObject - retrieve object from Object Storage +// +// Additionally this function also takes range arguments to download the specified +// range bytes of an object. Setting offset and length = 0 will download the full object. +// +// For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. +func (a apiCore) getObject(bucket, object string, offset, length int64) (io.ReadCloser, ObjectStat, error) { + if err := invalidArgumentError(object); err != nil { + return nil, ObjectStat{}, err + } + req, err := a.getObjectRequest(bucket, object, offset, length) + if err != nil { + return nil, ObjectStat{}, err + } + resp, err := req.Do() + if err != nil { + return nil, ObjectStat{}, err + } + if resp != nil { + switch resp.StatusCode { + case http.StatusOK: + case http.StatusPartialContent: + default: + return nil, ObjectStat{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) + } + } + md5sum := strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes + date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) + if err != nil { + return nil, ObjectStat{}, ErrorResponse{ + Code: "InternalError", + Message: "Last-Modified time format not recognized, please report this issue at https://github.com/minio/minio-go/issues", + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + } + } + contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) + if contentType == "" { + contentType = "application/octet-stream" + } + var objectstat ObjectStat + objectstat.ETag = md5sum + objectstat.Key = object + objectstat.Size = resp.ContentLength + objectstat.LastModified = date + objectstat.ContentType = contentType + + // do not close body here, caller will close + return resp.Body, objectstat, nil +} + +// deleteObjectRequest wrapper creates a new deleteObject request +func (a apiCore) deleteObjectRequest(bucket, object string) (*request, error) { + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "DELETE", + HTTPPath: separator + bucket + separator + object, + } + return newRequest(op, a.config, nil) +} + +// deleteObject deletes a given object from a bucket +func (a apiCore) deleteObject(bucket, object string) error { + if err := invalidBucketError(bucket); err != nil { + return err + } + if err := invalidArgumentError(object); err != nil { + return err + } + req, err := a.deleteObjectRequest(bucket, object) + if err != nil { + return err + } + resp, err := req.Do() + defer closeResp(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + var errorResponse ErrorResponse + switch resp.StatusCode { + case http.StatusNotFound: + errorResponse = ErrorResponse{ + Code: "NoSuchKey", + Message: "The specified key does not exist.", + Resource: separator + bucket + separator + object, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + } + case http.StatusForbidden: + errorResponse = ErrorResponse{ + Code: "AccessDenied", + Message: "Access Denied", + Resource: separator + bucket + separator + object, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + } + default: + errorResponse = ErrorResponse{ + Code: resp.Status, + Message: resp.Status, + Resource: separator + bucket + separator + object, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + } + } + return errorResponse + } + } + return nil +} + +// headObjectRequest wrapper creates a new headObject request +func (a apiCore) headObjectRequest(bucket, object string) (*request, error) { + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "HEAD", + HTTPPath: separator + bucket + separator + object, + } + return newRequest(op, a.config, nil) +} + +// headObject retrieves metadata from an object without returning the object itself +func (a apiCore) headObject(bucket, object string) (ObjectStat, error) { + if err := invalidBucketError(bucket); err != nil { + return ObjectStat{}, err + } + if err := invalidArgumentError(object); err != nil { + return ObjectStat{}, err + } + req, err := a.headObjectRequest(bucket, object) + if err != nil { + return ObjectStat{}, err + } + resp, err := req.Do() + defer closeResp(resp) + if err != nil { + return ObjectStat{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + var errorResponse ErrorResponse + switch resp.StatusCode { + case http.StatusNotFound: + errorResponse = ErrorResponse{ + Code: "NoSuchKey", + Message: "The specified key does not exist.", + Resource: separator + bucket + separator + object, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + } + case http.StatusForbidden: + errorResponse = ErrorResponse{ + Code: "AccessDenied", + Message: "Access Denied", + Resource: separator + bucket + separator + object, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + } + default: + errorResponse = ErrorResponse{ + Code: resp.Status, + Message: resp.Status, + Resource: separator + bucket + separator + object, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + } + + } + return ObjectStat{}, errorResponse + } + } + md5sum := strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + return ObjectStat{}, ErrorResponse{ + Code: "InternalError", + Message: "Content-Length not recognized, please report this issue at https://github.com/minio/minio-go/issues", + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + } + } + date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) + if err != nil { + return ObjectStat{}, ErrorResponse{ + Code: "InternalError", + Message: "Last-Modified time format not recognized, please report this issue at https://github.com/minio/minio-go/issues", + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + } + } + contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) + if contentType == "" { + contentType = "application/octet-stream" + } + + var objectstat ObjectStat + objectstat.ETag = md5sum + objectstat.Key = object + objectstat.Size = size + objectstat.LastModified = date + objectstat.ContentType = contentType + return objectstat, nil +} + +/// Service Operations + +// listBucketRequest wrapper creates a new listBuckets request +func (a apiCore) listBucketsRequest() (*request, error) { + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "GET", + HTTPPath: separator, + } + return newRequest(op, a.config, nil) +} + +// listBuckets list of all buckets owned by the authenticated sender of the request +func (a apiCore) listBuckets() (listAllMyBucketsResult, error) { + req, err := a.listBucketsRequest() + if err != nil { + return listAllMyBucketsResult{}, err + } + resp, err := req.Do() + defer closeResp(resp) + if err != nil { + return listAllMyBucketsResult{}, err + } + if resp != nil { + // for un-authenticated requests, amazon sends a redirect handle it + if resp.StatusCode == http.StatusTemporaryRedirect { + return listAllMyBucketsResult{}, ErrorResponse{ + Code: "AccessDenied", + Message: "Anonymous access is forbidden for this operation", + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + } + } + if resp.StatusCode != http.StatusOK { + return listAllMyBucketsResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) + } + } + listAllMyBucketsResult := listAllMyBucketsResult{} + err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &listAllMyBucketsResult) + if err != nil { + return listAllMyBucketsResult, err + } + return listAllMyBucketsResult, nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-multipart-core.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-multipart-core.go new file mode 100644 index 000000000..05e1f74c7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-multipart-core.go @@ -0,0 +1,329 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + "strconv" +) + +// listMultipartUploadsRequest wrapper creates a new listMultipartUploads request +func (a apiCore) listMultipartUploadsRequest(bucket, keymarker, uploadIDMarker, prefix, delimiter string, maxuploads int) (*request, error) { + // resourceQuery - get resources properly escaped and lined up before using them in http request + resourceQuery := func() (string, error) { + switch { + case keymarker != "": + keymarker = fmt.Sprintf("&key-marker=%s", getURLEncodedPath(keymarker)) + fallthrough + case uploadIDMarker != "": + uploadIDMarker = fmt.Sprintf("&upload-id-marker=%s", uploadIDMarker) + fallthrough + case prefix != "": + prefix = fmt.Sprintf("&prefix=%s", getURLEncodedPath(prefix)) + fallthrough + case delimiter != "": + delimiter = fmt.Sprintf("&delimiter=%s", delimiter) + } + query := fmt.Sprintf("?uploads&max-uploads=%d", maxuploads) + keymarker + uploadIDMarker + prefix + delimiter + return query, nil + } + query, err := resourceQuery() + if err != nil { + return nil, err + } + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "GET", + HTTPPath: separator + bucket + query, + } + r, err := newRequest(op, a.config, nil) + if err != nil { + return nil, err + } + return r, nil +} + +// listMultipartUploads - (List Multipart Uploads) - Lists some or all (up to 1000) in-progress multipart uploads in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. +// request paramters :- +// --------- +// ?key-marker - Specifies the multipart upload after which listing should begin +// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. +func (a apiCore) listMultipartUploads(bucket, keymarker, uploadIDMarker, prefix, delimiter string, maxuploads int) (listMultipartUploadsResult, error) { + req, err := a.listMultipartUploadsRequest(bucket, keymarker, uploadIDMarker, prefix, delimiter, maxuploads) + if err != nil { + return listMultipartUploadsResult{}, err + } + resp, err := req.Do() + defer closeResp(resp) + if err != nil { + return listMultipartUploadsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return listMultipartUploadsResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) + } + } + listMultipartUploadsResult := listMultipartUploadsResult{} + err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &listMultipartUploadsResult) + if err != nil { + return listMultipartUploadsResult, err + } + // close body while returning, along with any error + return listMultipartUploadsResult, nil +} + +// initiateMultipartRequest wrapper creates a new initiateMultiPart request +func (a apiCore) initiateMultipartRequest(bucket, object string) (*request, error) { + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "POST", + HTTPPath: separator + bucket + separator + object + "?uploads", + } + return newRequest(op, a.config, nil) +} + +// initiateMultipartUpload initiates a multipart upload and returns an upload ID +func (a apiCore) initiateMultipartUpload(bucket, object string) (initiateMultipartUploadResult, error) { + req, err := a.initiateMultipartRequest(bucket, object) + if err != nil { + return initiateMultipartUploadResult{}, err + } + resp, err := req.Do() + defer closeResp(resp) + if err != nil { + return initiateMultipartUploadResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return initiateMultipartUploadResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) + } + } + initiateMultipartUploadResult := initiateMultipartUploadResult{} + err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &initiateMultipartUploadResult) + if err != nil { + return initiateMultipartUploadResult, err + } + return initiateMultipartUploadResult, nil +} + +// completeMultipartUploadRequest wrapper creates a new CompleteMultipartUpload request +func (a apiCore) completeMultipartUploadRequest(bucket, object, uploadID string, complete completeMultipartUpload) (*request, error) { + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "POST", + HTTPPath: separator + bucket + separator + object + "?uploadId=" + uploadID, + } + var completeMultipartUploadBytes []byte + var err error + switch { + case a.config.AcceptType == "application/xml": + completeMultipartUploadBytes, err = xml.Marshal(complete) + case a.config.AcceptType == "application/json": + completeMultipartUploadBytes, err = json.Marshal(complete) + default: + completeMultipartUploadBytes, err = xml.Marshal(complete) + } + if err != nil { + return nil, err + } + completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) + r, err := newRequest(op, a.config, completeMultipartUploadBuffer) + if err != nil { + return nil, err + } + r.req.ContentLength = int64(completeMultipartUploadBuffer.Len()) + return r, nil +} + +// completeMultipartUpload completes a multipart upload by assembling previously uploaded parts. +func (a apiCore) completeMultipartUpload(bucket, object, uploadID string, c completeMultipartUpload) (completeMultipartUploadResult, error) { + req, err := a.completeMultipartUploadRequest(bucket, object, uploadID, c) + if err != nil { + return completeMultipartUploadResult{}, err + } + resp, err := req.Do() + defer closeResp(resp) + if err != nil { + return completeMultipartUploadResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return completeMultipartUploadResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) + } + } + completeMultipartUploadResult := completeMultipartUploadResult{} + err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &completeMultipartUploadResult) + if err != nil { + return completeMultipartUploadResult, err + } + return completeMultipartUploadResult, nil +} + +// abortMultipartUploadRequest wrapper creates a new AbortMultipartUpload request +func (a apiCore) abortMultipartUploadRequest(bucket, object, uploadID string) (*request, error) { + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "DELETE", + HTTPPath: separator + bucket + separator + object + "?uploadId=" + uploadID, + } + return newRequest(op, a.config, nil) +} + +// abortMultipartUpload aborts a multipart upload for the given uploadID, all parts are deleted +func (a apiCore) abortMultipartUpload(bucket, object, uploadID string) error { + req, err := a.abortMultipartUploadRequest(bucket, object, uploadID) + if err != nil { + return err + } + resp, err := req.Do() + defer closeResp(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + // Abort has no response body, handle it + var errorResponse ErrorResponse + switch resp.StatusCode { + case http.StatusNotFound: + errorResponse = ErrorResponse{ + Code: "NoSuchUpload", + Message: "The specified multipart upload does not exist.", + Resource: separator + bucket + separator + object, + RequestID: resp.Header.Get("x-amz-request-id"), + } + case http.StatusForbidden: + errorResponse = ErrorResponse{ + Code: "AccessDenied", + Message: "Access Denied", + Resource: separator + bucket + separator + object, + RequestID: resp.Header.Get("x-amz-request-id"), + } + default: + errorResponse = ErrorResponse{ + Code: resp.Status, + Message: "", + Resource: separator + bucket + separator + object, + RequestID: resp.Header.Get("x-amz-request-id"), + } + } + return errorResponse + } + } + return nil +} + +// listObjectPartsRequest wrapper creates a new ListObjectParts request +func (a apiCore) listObjectPartsRequest(bucket, object, uploadID string, partNumberMarker, maxParts int) (*request, error) { + // resourceQuery - get resources properly escaped and lined up before using them in http request + resourceQuery := func() string { + var partNumberMarkerStr string + switch { + case partNumberMarker != 0: + partNumberMarkerStr = fmt.Sprintf("&part-number-marker=%d", partNumberMarker) + } + return fmt.Sprintf("?uploadId=%s&max-parts=%d", uploadID, maxParts) + partNumberMarkerStr + } + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "GET", + HTTPPath: separator + bucket + separator + object + resourceQuery(), + } + return newRequest(op, a.config, nil) +} + +// listObjectParts (List Parts) - lists some or all (up to 1000) parts that have been uploaded for a specific multipart upload +// +// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. +// request paramters :- +// --------- +// ?part-number-marker - Specifies the part after which listing should begin. +func (a apiCore) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) { + req, err := a.listObjectPartsRequest(bucket, object, uploadID, partNumberMarker, maxParts) + if err != nil { + return listObjectPartsResult{}, err + } + resp, err := req.Do() + defer closeResp(resp) + if err != nil { + return listObjectPartsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return listObjectPartsResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) + } + } + listObjectPartsResult := listObjectPartsResult{} + err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &listObjectPartsResult) + if err != nil { + return listObjectPartsResult, err + } + return listObjectPartsResult, nil +} + +// uploadPartRequest wrapper creates a new UploadPart request +func (a apiCore) uploadPartRequest(bucket, object, uploadID string, md5SumBytes []byte, partNumber int, size int64, body io.ReadSeeker) (*request, error) { + op := &operation{ + HTTPServer: a.config.Endpoint, + HTTPMethod: "PUT", + HTTPPath: separator + bucket + separator + object + "?partNumber=" + strconv.Itoa(partNumber) + "&uploadId=" + uploadID, + } + r, err := newRequest(op, a.config, body) + if err != nil { + return nil, err + } + // set Content-MD5 as base64 encoded md5 + r.Set("Content-MD5", base64.StdEncoding.EncodeToString(md5SumBytes)) + r.req.ContentLength = size + return r, nil +} + +// uploadPart uploads a part in a multipart upload. +func (a apiCore) uploadPart(bucket, object, uploadID string, md5SumBytes []byte, partNumber int, size int64, body io.ReadSeeker) (completePart, error) { + req, err := a.uploadPartRequest(bucket, object, uploadID, md5SumBytes, partNumber, size, body) + if err != nil { + return completePart{}, err + } + cPart := completePart{} + cPart.PartNumber = partNumber + cPart.ETag = "\"" + hex.EncodeToString(md5SumBytes) + "\"" + + // initiate the request + resp, err := req.Do() + defer closeResp(resp) + if err != nil { + return completePart{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return completePart{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) + } + } + return cPart, nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api.go b/Godeps/_workspace/src/github.com/minio/minio-go/api.go new file mode 100644 index 000000000..f74f7c574 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api.go @@ -0,0 +1,1114 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/hex" + "errors" + "io" + "net/http" + "net/url" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +// API - Cloud Storage API interface +type API interface { + // Bucket Read/Write/Stat operations + BucketAPI + + // Object Read/Write/Stat operations + ObjectAPI + + // Presigned API + PresignedAPI +} + +// BucketAPI - bucket specific Read/Write/Stat interface +type BucketAPI interface { + MakeBucket(bucket string, cannedACL BucketACL) error + BucketExists(bucket string) error + RemoveBucket(bucket string) error + SetBucketACL(bucket string, cannedACL BucketACL) error + GetBucketACL(bucket string) (BucketACL, error) + + ListBuckets() <-chan BucketStatCh + ListObjects(bucket, prefix string, recursive bool) <-chan ObjectStatCh + ListIncompleteUploads(bucket, prefix string, recursive bool) <-chan ObjectMultipartStatCh +} + +// ObjectAPI - object specific Read/Write/Stat interface +type ObjectAPI interface { + GetObject(bucket, object string) (io.ReadCloser, ObjectStat, error) + GetPartialObject(bucket, object string, offset, length int64) (io.ReadCloser, ObjectStat, error) + PutObject(bucket, object, contentType string, size int64, data io.Reader) error + StatObject(bucket, object string) (ObjectStat, error) + RemoveObject(bucket, object string) error + + RemoveIncompleteUpload(bucket, object string) <-chan error +} + +// PresignedAPI - object specific for now +type PresignedAPI interface { + PresignedGetObject(bucket, object string, expires time.Duration) (string, error) + PresignedPutObject(bucket, object string, expires time.Duration) (string, error) + PresignedPostPolicy(*PostPolicy) (map[string]string, error) +} + +// BucketStatCh - bucket metadata over read channel +type BucketStatCh struct { + Stat BucketStat + Err error +} + +// ObjectStatCh - object metadata over read channel +type ObjectStatCh struct { + Stat ObjectStat + Err error +} + +// ObjectMultipartStatCh - multipart object metadata over read channel +type ObjectMultipartStatCh struct { + Stat ObjectMultipartStat + Err error +} + +// BucketStat container for bucket metadata +type BucketStat struct { + // The name of the bucket. + Name string + // Date the bucket was created. + CreationDate time.Time +} + +// ObjectStat container for object metadata +type ObjectStat struct { + ETag string + Key string + LastModified time.Time + Size int64 + ContentType string + + Owner struct { + DisplayName string + ID string + } + + // The class of storage used to store the object. + StorageClass string +} + +// ObjectMultipartStat container for multipart object metadata +type ObjectMultipartStat struct { + // Date and time at which the multipart upload was initiated. + Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Initiator initiator + Owner owner + + StorageClass string + + // Key of the object for which the multipart upload was initiated. + Key string + Size int64 + + // Upload ID that identifies the multipart upload. + UploadID string `xml:"UploadId"` +} + +// Regions s3 region map used by bucket location constraint +var regions = map[string]string{ + "s3-fips-us-gov-west-1.amazonaws.com": "us-gov-west-1", + "s3.amazonaws.com": "us-east-1", + "s3-external-1.amazonaws.com": "us-east-1", + "s3-us-west-1.amazonaws.com": "us-west-1", + "s3-us-west-2.amazonaws.com": "us-west-2", + "s3-eu-west-1.amazonaws.com": "eu-west-1", + "s3-eu-central-1.amazonaws.com": "eu-central-1", + "s3-ap-southeast-1.amazonaws.com": "ap-southeast-1", + "s3-ap-southeast-2.amazonaws.com": "ap-southeast-2", + "s3-ap-northeast-1.amazonaws.com": "ap-northeast-1", + "s3-sa-east-1.amazonaws.com": "sa-east-1", + "s3.cn-north-1.amazonaws.com.cn": "cn-north-1", +} + +// getRegion returns a region based on its endpoint mapping. +func getRegion(host string) (region string) { + if _, ok := regions[host]; ok { + return regions[host] + } + // Region cannot be empty according to Amazon S3. + // So we address all the four quadrants of our galaxy. + return "milkyway" +} + +// Config - main configuration struct used by all to set endpoint, credentials, and other options for requests. +type Config struct { + // Standard options + AccessKeyID string + SecretAccessKey string + Endpoint string + + // Advanced options + // Specify this to get server response in non XML style if server supports it + AcceptType string + // Optional field. If empty, region is determined automatically. + Region string + + // Expert options + // + // Set this to override default transport ``http.DefaultTransport`` + // + // This transport is usually needed for debugging OR to add your own + // custom TLS certificates on the client transport, for custom CA's and + // certs which are not part of standard certificate authority + // + // For example :- + // + // tr := &http.Transport{ + // TLSClientConfig: &tls.Config{RootCAs: pool}, + // DisableCompression: true, + // } + // + Transport http.RoundTripper + + // internal + // use SetUserAgent append to default, useful when minio-go is used with in your application + userAgent string + isUserAgentSet bool // allow user agent's to be set only once + isVirtualStyle bool // set when virtual hostnames are on +} + +// Global constants +const ( + LibraryName = "minio-go" + LibraryVersion = "0.2.5" +) + +// SetUserAgent - append to a default user agent +func (c *Config) SetUserAgent(name string, version string, comments ...string) { + if c.isUserAgentSet { + // if user agent already set do not set it + return + } + // if no name and version is set we do not add new user agents + if name != "" && version != "" { + c.userAgent = c.userAgent + " " + name + "/" + version + " (" + strings.Join(comments, "; ") + ") " + c.isUserAgentSet = true + } +} + +type api struct { + apiCore +} + +// New - instantiate a new minio api client +func New(config Config) (API, error) { + if strings.TrimSpace(config.Region) == "" || len(config.Region) == 0 { + u, err := url.Parse(config.Endpoint) + if err != nil { + return api{}, err + } + match, _ := filepath.Match("*.s3*.amazonaws.com", u.Host) + if match { + config.isVirtualStyle = true + hostSplits := strings.SplitN(u.Host, ".", 2) + u.Host = hostSplits[1] + } + config.Region = getRegion(u.Host) + } + config.SetUserAgent(LibraryName, LibraryVersion, runtime.GOOS, runtime.GOARCH) + config.isUserAgentSet = false // default + return api{apiCore{&config}}, nil +} + +/// Object operations + +/// Expires maximum is 7days - ie. 604800 and minimum is 1 + +// PresignedPutObject get a presigned URL to upload an object +func (a api) PresignedPutObject(bucket, object string, expires time.Duration) (string, error) { + expireSeconds := int64(expires / time.Second) + if expireSeconds < 1 || expireSeconds > 604800 { + return "", invalidArgumentError("") + } + return a.presignedPutObject(bucket, object, expireSeconds) +} + +// PresignedGetObject get a presigned URL to retrieve an object for third party apps +func (a api) PresignedGetObject(bucket, object string, expires time.Duration) (string, error) { + expireSeconds := int64(expires / time.Second) + if expireSeconds < 1 || expireSeconds > 604800 { + return "", invalidArgumentError("") + } + return a.presignedGetObject(bucket, object, expireSeconds, 0, 0) +} + +// GetObject retrieve object + +// Downloads full object with no ranges, if you need ranges use GetPartialObject +func (a api) GetObject(bucket, object string) (io.ReadCloser, ObjectStat, error) { + if err := invalidBucketError(bucket); err != nil { + return nil, ObjectStat{}, err + } + if err := invalidObjectError(object); err != nil { + return nil, ObjectStat{}, err + } + // get object + return a.getObject(bucket, object, 0, 0) +} + +// GetPartialObject retrieve partial object +// +// Takes range arguments to download the specified range bytes of an object. +// Setting offset and length = 0 will download the full object. +// For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. +func (a api) GetPartialObject(bucket, object string, offset, length int64) (io.ReadCloser, ObjectStat, error) { + if err := invalidBucketError(bucket); err != nil { + return nil, ObjectStat{}, err + } + if err := invalidObjectError(object); err != nil { + return nil, ObjectStat{}, err + } + // get partial object + return a.getObject(bucket, object, offset, length) +} + +// completedParts is a wrapper to make parts sortable by their part number +// multi part completion requires list of multi parts to be sorted +type completedParts []completePart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } + +// minimumPartSize minimum part size per object after which PutObject behaves internally as multipart +var minimumPartSize int64 = 1024 * 1024 * 5 + +// maxParts - unexported right now +var maxParts = int64(10000) + +// maxPartSize - unexported right now +var maxPartSize int64 = 1024 * 1024 * 1024 * 5 + +// maxConcurrentQueue - max concurrent upload queue +var maxConcurrentQueue int64 = 4 + +// calculatePartSize - calculate the optimal part size for the given objectSize +// +// NOTE: Assumption here is that for any given object upload to a S3 compatible object +// storage it will have the following parameters as constants +// +// maxParts +// maximumPartSize +// minimumPartSize +// +// if a the partSize after division with maxParts is greater than minimumPartSize +// then choose that to be the new part size, if not return MinimumPartSize +// +// special case where it happens to be that partSize is indeed bigger than the +// maximum part size just return maxPartSize back +func calculatePartSize(objectSize int64) int64 { + // make sure last part has enough buffer and handle this poperly + partSize := (objectSize / (maxParts - 1)) + if partSize > minimumPartSize { + if partSize > maxPartSize { + return maxPartSize + } + return partSize + } + return minimumPartSize +} + +func (a api) newObjectUpload(bucket, object, contentType string, size int64, data io.Reader) error { + initMultipartUploadResult, err := a.initiateMultipartUpload(bucket, object) + if err != nil { + return err + } + uploadID := initMultipartUploadResult.UploadID + complMultipartUpload := completeMultipartUpload{} + var totalLength int64 + + // Calculate optimal part size for a given size + partSize := calculatePartSize(size) + // Allocate bufferred error channel for maximum parts + errCh := make(chan error, maxParts) + // Limit multi part queue size to concurrent + mpQueueCh := make(chan struct{}, maxConcurrentQueue) + defer close(errCh) + defer close(mpQueueCh) + // Allocate a new wait group + wg := new(sync.WaitGroup) + + for p := range chopper(data, partSize, nil) { + // This check is primarily for last part + // This verifies if the part.Len was an unexpected read i.e if we lost few bytes + if p.Len < partSize && size > 0 { + expectedPartLen := size - totalLength + if expectedPartLen != p.Len { + return ErrorResponse{ + Code: "UnexpectedShortRead", + Message: "Data read ‘" + strconv.FormatInt(expectedPartLen, 10) + "’ is not equal to expected size ‘" + strconv.FormatInt(p.Len, 10) + "’", + Resource: separator + bucket + separator + object, + } + } + } + // Limit to 4 parts a given time + mpQueueCh <- struct{}{} + // Account for all parts uploaded simultaneousy + wg.Add(1) + go func(errCh chan<- error, mpQueueCh <-chan struct{}, p part) { + defer wg.Done() + defer func() { + <-mpQueueCh + }() + if p.Err != nil { + errCh <- p.Err + return + } + var complPart completePart + complPart, err = a.uploadPart(bucket, object, uploadID, p.MD5Sum, p.Num, p.Len, p.ReadSeeker) + if err != nil { + errCh <- err + return + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, complPart) + errCh <- nil + }(errCh, mpQueueCh, p) + totalLength += p.Len + } + wg.Wait() + if err := <-errCh; err != nil { + return err + } + sort.Sort(completedParts(complMultipartUpload.Parts)) + _, err = a.completeMultipartUpload(bucket, object, uploadID, complMultipartUpload) + if err != nil { + return err + } + return nil +} + +type partCh struct { + Metadata partMetadata + Err error +} + +func (a api) listObjectPartsRecursive(bucket, object, uploadID string) <-chan partCh { + partCh := make(chan partCh, 1000) + go a.listObjectPartsRecursiveInRoutine(bucket, object, uploadID, partCh) + return partCh +} + +func (a api) listObjectPartsRecursiveInRoutine(bucket, object, uploadID string, ch chan partCh) { + defer close(ch) + listObjPartsResult, err := a.listObjectParts(bucket, object, uploadID, 0, 1000) + if err != nil { + ch <- partCh{ + Metadata: partMetadata{}, + Err: err, + } + return + } + for _, uploadedPart := range listObjPartsResult.Parts { + ch <- partCh{ + Metadata: uploadedPart, + Err: nil, + } + } + for { + if !listObjPartsResult.IsTruncated { + break + } + listObjPartsResult, err = a.listObjectParts(bucket, object, uploadID, listObjPartsResult.NextPartNumberMarker, 1000) + if err != nil { + ch <- partCh{ + Metadata: partMetadata{}, + Err: err, + } + return + } + for _, uploadedPart := range listObjPartsResult.Parts { + ch <- partCh{ + Metadata: uploadedPart, + Err: nil, + } + } + } +} + +func (a api) getMultipartSize(bucket, object, uploadID string) (int64, error) { + var size int64 + for part := range a.listObjectPartsRecursive(bucket, object, uploadID) { + if part.Err != nil { + return 0, part.Err + } + size += part.Metadata.Size + } + return size, nil +} + +func (a api) continueObjectUpload(bucket, object, uploadID string, size int64, data io.Reader) error { + var skipParts []skipPart + completeMultipartUpload := completeMultipartUpload{} + var totalLength int64 + for part := range a.listObjectPartsRecursive(bucket, object, uploadID) { + if part.Err != nil { + return part.Err + } + var completedPart completePart + completedPart.PartNumber = part.Metadata.PartNumber + completedPart.ETag = part.Metadata.ETag + completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, completedPart) + md5SumBytes, err := hex.DecodeString(strings.Trim(part.Metadata.ETag, "\"")) // trim off the odd double quotes + if err != nil { + return err + } + totalLength += part.Metadata.Size + skipParts = append(skipParts, skipPart{ + md5sum: md5SumBytes, + partNumber: part.Metadata.PartNumber, + }) + } + + // Calculate the optimal part size for a given size + partSize := calculatePartSize(size) + // Allocate bufferred error channel for maximum parts + errCh := make(chan error, maxParts) + // Limit multipart queue size to concurrent + mpQueueCh := make(chan struct{}, maxConcurrentQueue) + defer close(errCh) + defer close(mpQueueCh) + // Allocate a new wait group + wg := new(sync.WaitGroup) + + for p := range chopper(data, partSize, skipParts) { + // This check is primarily for last part + // This verifies if the part.Len was an unexpected read i.e if we lost few bytes + if p.Len < partSize && size > 0 { + expectedPartLen := size - totalLength + if expectedPartLen != p.Len { + return ErrorResponse{ + Code: "UnexpectedShortRead", + Message: "Data read ‘" + strconv.FormatInt(expectedPartLen, 10) + "’ is not equal to expected size ‘" + strconv.FormatInt(p.Len, 10) + "’", + Resource: separator + bucket + separator + object, + } + } + } + // Limit to 4 parts a given time + mpQueueCh <- struct{}{} + // Account for all parts uploaded simultaneousy + wg.Add(1) + go func(errCh chan<- error, mpQueueCh <-chan struct{}, p part) { + defer wg.Done() + defer func() { + <-mpQueueCh + }() + if p.Err != nil { + errCh <- p.Err + return + } + completedPart, err := a.uploadPart(bucket, object, uploadID, p.MD5Sum, p.Num, p.Len, p.ReadSeeker) + if err != nil { + errCh <- err + return + } + completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, completedPart) + errCh <- nil + }(errCh, mpQueueCh, p) + totalLength += p.Len + } + wg.Wait() + if err := <-errCh; err != nil { + return err + } + sort.Sort(completedParts(completeMultipartUpload.Parts)) + _, err := a.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload) + if err != nil { + return err + } + return nil +} + +// PresignedPostPolicy return POST form data that can be used for object upload +func (a api) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { + if p.expiration.IsZero() { + return nil, errors.New("Expiration time must be specified") + } + if _, ok := p.formData["key"]; !ok { + return nil, errors.New("object key must be specified") + } + if _, ok := p.formData["bucket"]; !ok { + return nil, errors.New("bucket name must be specified") + } + return a.presignedPostPolicy(p), nil +} + +// PutObject create an object in a bucket +// +// You must have WRITE permissions on a bucket to create an object +// +// This version of PutObject automatically does multipart for more than 5MB worth of data +func (a api) PutObject(bucket, object, contentType string, size int64, data io.Reader) error { + if err := invalidBucketError(bucket); err != nil { + return err + } + if err := invalidArgumentError(object); err != nil { + return err + } + // for un-authenticated requests do not initiated multipart operation + // + // NOTE: this behavior is only kept valid for S3, since S3 doesn't + // allow unauthenticated multipart requests + if a.config.Region != "milkyway" { + if a.config.AccessKeyID == "" || a.config.SecretAccessKey == "" { + _, err := a.putObjectUnAuthenticated(bucket, object, contentType, size, data) + if err != nil { + return err + } + return nil + } + } + switch { + case size < minimumPartSize && size > 0: + // Single Part use case, use PutObject directly + for part := range chopper(data, minimumPartSize, nil) { + if part.Err != nil { + return part.Err + } + // This verifies if the part.Len was an unexpected read i.e if we lost few bytes + if part.Len != size { + return ErrorResponse{ + Code: "MethodUnexpectedEOF", + Message: "Data read is less than the requested size", + Resource: separator + bucket + separator + object, + } + } + _, err := a.putObject(bucket, object, contentType, part.MD5Sum, part.Len, part.ReadSeeker) + if err != nil { + return err + } + return nil + } + default: + var inProgress bool + var inProgressUploadID string + for mpUpload := range a.listMultipartUploadsRecursive(bucket, object) { + if mpUpload.Err != nil { + return mpUpload.Err + } + if mpUpload.Metadata.Key == object { + inProgress = true + inProgressUploadID = mpUpload.Metadata.UploadID + break + } + } + if !inProgress { + return a.newObjectUpload(bucket, object, contentType, size, data) + } + return a.continueObjectUpload(bucket, object, inProgressUploadID, size, data) + } + return errors.New("Unexpected control flow, please report this error at https://github.com/minio/minio-go/issues") +} + +// StatObject verify if object exists and you have permission to access it +func (a api) StatObject(bucket, object string) (ObjectStat, error) { + if err := invalidBucketError(bucket); err != nil { + return ObjectStat{}, err + } + if err := invalidObjectError(object); err != nil { + return ObjectStat{}, err + } + return a.headObject(bucket, object) +} + +// RemoveObject remove the object from a bucket +func (a api) RemoveObject(bucket, object string) error { + if err := invalidBucketError(bucket); err != nil { + return err + } + if err := invalidObjectError(object); err != nil { + return err + } + return a.deleteObject(bucket, object) +} + +/// Bucket operations + +// MakeBucket make a new bucket +// +// optional arguments are acl and location - by default all buckets are created +// with ``private`` acl and location set to US Standard if one wishes to set +// different ACLs and Location one can set them properly. +// +// ACL valid values +// +// private - owner gets full access [default] +// public-read - owner gets full access, all others get read access +// public-read-write - owner gets full access, all others get full access too +// authenticated-read - owner gets full access, authenticated users get read access +// +// Location valid values which are automatically derived from config endpoint +// +// [ us-west-1 | us-west-2 | eu-west-1 | eu-central-1 | ap-southeast-1 | ap-northeast-1 | ap-southeast-2 | sa-east-1 ] +// Default - US standard +func (a api) MakeBucket(bucket string, acl BucketACL) error { + if err := invalidBucketError(bucket); err != nil { + return err + } + if !acl.isValidBucketACL() { + return invalidArgumentError("") + } + location := a.config.Region + if location == "milkyway" { + location = "" + } + if location == "us-east-1" { + location = "" + } + return a.putBucket(bucket, string(acl), location) +} + +// SetBucketACL set the permissions on an existing bucket using access control lists (ACL) +// +// For example +// +// private - owner gets full access [default] +// public-read - owner gets full access, all others get read access +// public-read-write - owner gets full access, all others get full access too +// authenticated-read - owner gets full access, authenticated users get read access +// +func (a api) SetBucketACL(bucket string, acl BucketACL) error { + if err := invalidBucketError(bucket); err != nil { + return err + } + if !acl.isValidBucketACL() { + return invalidArgumentError("") + } + return a.putBucketACL(bucket, string(acl)) +} + +// GetBucketACL get the permissions on an existing bucket +// +// Returned values are: +// +// private - owner gets full access +// public-read - owner gets full access, others get read access +// public-read-write - owner gets full access, others get full access too +// authenticated-read - owner gets full access, authenticated users get read access +// +func (a api) GetBucketACL(bucket string) (BucketACL, error) { + if err := invalidBucketError(bucket); err != nil { + return "", err + } + policy, err := a.getBucketACL(bucket) + if err != nil { + return "", err + } + grants := policy.AccessControlList.Grant + switch { + case len(grants) == 1: + if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" { + return BucketACL("private"), nil + } + case len(grants) == 2: + for _, g := range grants { + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { + return BucketACL("authenticated-read"), nil + } + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { + return BucketACL("public-read"), nil + } + } + case len(grants) == 3: + for _, g := range grants { + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { + return BucketACL("public-read-write"), nil + } + } + } + return "", ErrorResponse{ + Code: "NoSuchBucketPolicy", + Message: "The specified bucket does not have a bucket policy.", + Resource: "/" + bucket, + RequestID: "minio", + } +} + +// BucketExists verify if bucket exists and you have permission to access it +func (a api) BucketExists(bucket string) error { + if err := invalidBucketError(bucket); err != nil { + return err + } + return a.headBucket(bucket) +} + +// RemoveBucket deletes the bucket named in the URI +// NOTE: - +// All objects (including all object versions and delete markers) +// in the bucket must be deleted before successfully attempting this request +func (a api) RemoveBucket(bucket string) error { + if err := invalidBucketError(bucket); err != nil { + return err + } + return a.deleteBucket(bucket) +} + +type multiPartUploadCh struct { + Metadata ObjectMultipartStat + Err error +} + +func (a api) listMultipartUploadsRecursive(bucket, object string) <-chan multiPartUploadCh { + ch := make(chan multiPartUploadCh, 1000) + go a.listMultipartUploadsRecursiveInRoutine(bucket, object, ch) + return ch +} + +func (a api) listMultipartUploadsRecursiveInRoutine(bucket, object string, ch chan multiPartUploadCh) { + defer close(ch) + listMultipartUplResult, err := a.listMultipartUploads(bucket, "", "", object, "", 1000) + if err != nil { + ch <- multiPartUploadCh{ + Metadata: ObjectMultipartStat{}, + Err: err, + } + return + } + for _, multiPartUpload := range listMultipartUplResult.Uploads { + ch <- multiPartUploadCh{ + Metadata: multiPartUpload, + Err: nil, + } + } + for { + if !listMultipartUplResult.IsTruncated { + break + } + listMultipartUplResult, err = a.listMultipartUploads(bucket, + listMultipartUplResult.NextKeyMarker, listMultipartUplResult.NextUploadIDMarker, object, "", 1000) + if err != nil { + ch <- multiPartUploadCh{ + Metadata: ObjectMultipartStat{}, + Err: err, + } + return + } + for _, multiPartUpload := range listMultipartUplResult.Uploads { + ch <- multiPartUploadCh{ + Metadata: multiPartUpload, + Err: nil, + } + } + } +} + +// listIncompleteUploadsInRoutine is an internal goroutine function called for listing objects +// This function feeds data into channel +func (a api) listIncompleteUploadsInRoutine(bucket, prefix string, recursive bool, ch chan ObjectMultipartStatCh) { + defer close(ch) + if err := invalidBucketError(bucket); err != nil { + ch <- ObjectMultipartStatCh{ + Stat: ObjectMultipartStat{}, + Err: err, + } + return + } + switch { + case recursive == true: + var multipartMarker string + var uploadIDMarker string + for { + result, err := a.listMultipartUploads(bucket, multipartMarker, uploadIDMarker, prefix, "", 1000) + if err != nil { + ch <- ObjectMultipartStatCh{ + Stat: ObjectMultipartStat{}, + Err: err, + } + return + } + for _, objectSt := range result.Uploads { + objectSt.Size, err = a.getMultipartSize(bucket, objectSt.Key, objectSt.UploadID) + if err != nil { + ch <- ObjectMultipartStatCh{ + Stat: ObjectMultipartStat{}, + Err: err, + } + } + ch <- ObjectMultipartStatCh{ + Stat: objectSt, + Err: nil, + } + multipartMarker = result.NextKeyMarker + uploadIDMarker = result.NextUploadIDMarker + } + if !result.IsTruncated { + break + } + } + default: + var multipartMarker string + var uploadIDMarker string + for { + result, err := a.listMultipartUploads(bucket, multipartMarker, uploadIDMarker, prefix, "/", 1000) + if err != nil { + ch <- ObjectMultipartStatCh{ + Stat: ObjectMultipartStat{}, + Err: err, + } + return + } + multipartMarker = result.NextKeyMarker + uploadIDMarker = result.NextUploadIDMarker + for _, objectSt := range result.Uploads { + objectSt.Size, err = a.getMultipartSize(bucket, objectSt.Key, objectSt.UploadID) + if err != nil { + ch <- ObjectMultipartStatCh{ + Stat: ObjectMultipartStat{}, + Err: err, + } + } + ch <- ObjectMultipartStatCh{ + Stat: objectSt, + Err: nil, + } + } + for _, prefix := range result.CommonPrefixes { + object := ObjectMultipartStat{} + object.Key = prefix.Prefix + object.Size = 0 + ch <- ObjectMultipartStatCh{ + Stat: object, + Err: nil, + } + } + if !result.IsTruncated { + break + } + } + } +} + +// ListIncompleteUploads - (List incompletely uploaded multipart objects) - List some multipart objects or all recursively +// +// ListIncompleteUploads is a channel based API implemented to facilitate ease of usage of S3 API ListMultipartUploads() +// by automatically recursively traversing all multipart objects on a given bucket if specified. +// +// Your input paramters are just bucket, prefix and recursive +// +// If you enable recursive as 'true' this function will return back all the multipart objects in a given bucket +// +// eg:- +// api := client.New(....) +// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", true) { +// fmt.Println(message.Stat) +// } +// +func (a api) ListIncompleteUploads(bucket, prefix string, recursive bool) <-chan ObjectMultipartStatCh { + ch := make(chan ObjectMultipartStatCh, 1000) + go a.listIncompleteUploadsInRoutine(bucket, prefix, recursive, ch) + return ch +} + +// listObjectsInRoutine is an internal goroutine function called for listing objects +// This function feeds data into channel +func (a api) listObjectsInRoutine(bucket, prefix string, recursive bool, ch chan ObjectStatCh) { + defer close(ch) + if err := invalidBucketError(bucket); err != nil { + ch <- ObjectStatCh{ + Stat: ObjectStat{}, + Err: err, + } + return + } + switch { + case recursive == true: + var marker string + for { + result, err := a.listObjects(bucket, marker, prefix, "", 1000) + if err != nil { + ch <- ObjectStatCh{ + Stat: ObjectStat{}, + Err: err, + } + return + } + for _, object := range result.Contents { + ch <- ObjectStatCh{ + Stat: object, + Err: nil, + } + marker = object.Key + } + if !result.IsTruncated { + break + } + } + default: + var marker string + for { + result, err := a.listObjects(bucket, marker, prefix, "/", 1000) + if err != nil { + ch <- ObjectStatCh{ + Stat: ObjectStat{}, + Err: err, + } + return + } + marker = result.NextMarker + for _, object := range result.Contents { + ch <- ObjectStatCh{ + Stat: object, + Err: nil, + } + } + for _, prefix := range result.CommonPrefixes { + object := ObjectStat{} + object.Key = prefix.Prefix + object.Size = 0 + ch <- ObjectStatCh{ + Stat: object, + Err: nil, + } + } + if !result.IsTruncated { + break + } + } + } +} + +// ListObjects - (List Objects) - List some objects or all recursively +// +// ListObjects is a channel based API implemented to facilitate ease of usage of S3 API ListObjects() +// by automatically recursively traversing all objects on a given bucket if specified. +// +// Your input paramters are just bucket, prefix and recursive +// +// If you enable recursive as 'true' this function will return back all the objects in a given bucket +// +// eg:- +// api := client.New(....) +// for message := range api.ListObjects("mytestbucket", "starthere", true) { +// fmt.Println(message.Stat) +// } +// +func (a api) ListObjects(bucket string, prefix string, recursive bool) <-chan ObjectStatCh { + ch := make(chan ObjectStatCh, 1000) + go a.listObjectsInRoutine(bucket, prefix, recursive, ch) + return ch +} + +// listBucketsInRoutine is an internal go routine function called for listing buckets +// This function feeds data into channel +func (a api) listBucketsInRoutine(ch chan BucketStatCh) { + defer close(ch) + listAllMyBucketListResults, err := a.listBuckets() + if err != nil { + ch <- BucketStatCh{ + Stat: BucketStat{}, + Err: err, + } + return + } + for _, bucket := range listAllMyBucketListResults.Buckets.Bucket { + ch <- BucketStatCh{ + Stat: bucket, + Err: nil, + } + } +} + +// ListBuckets list of all buckets owned by the authenticated sender of the request +// +// NOTE: +// This call requires explicit authentication, no anonymous +// requests are allowed for listing buckets +// +// eg:- +// api := client.New(....) +// for message := range api.ListBuckets() { +// fmt.Println(message.Stat) +// } +// +func (a api) ListBuckets() <-chan BucketStatCh { + ch := make(chan BucketStatCh, 100) + go a.listBucketsInRoutine(ch) + return ch +} + +func (a api) removeIncompleteUploadInRoutine(bucket, object string, errorCh chan error) { + defer close(errorCh) + if err := invalidBucketError(bucket); err != nil { + errorCh <- err + return + } + if err := invalidObjectError(object); err != nil { + errorCh <- err + return + } + listMultipartUplResult, err := a.listMultipartUploads(bucket, "", "", object, "", 1000) + if err != nil { + errorCh <- err + return + } + for _, multiPartUpload := range listMultipartUplResult.Uploads { + if object == multiPartUpload.Key { + err := a.abortMultipartUpload(bucket, multiPartUpload.Key, multiPartUpload.UploadID) + if err != nil { + errorCh <- err + return + } + return + } + } + for { + if !listMultipartUplResult.IsTruncated { + break + } + listMultipartUplResult, err = a.listMultipartUploads(bucket, + listMultipartUplResult.NextKeyMarker, listMultipartUplResult.NextUploadIDMarker, object, "", 1000) + if err != nil { + errorCh <- err + return + } + for _, multiPartUpload := range listMultipartUplResult.Uploads { + if object == multiPartUpload.Key { + err := a.abortMultipartUpload(bucket, multiPartUpload.Key, multiPartUpload.UploadID) + if err != nil { + errorCh <- err + return + } + return + } + } + + } +} + +// RemoveIncompleteUpload - abort a specific in progress active multipart upload +// requires explicit authentication, no anonymous requests are allowed for multipart API +func (a api) RemoveIncompleteUpload(bucket, object string) <-chan error { + errorCh := make(chan error) + go a.removeIncompleteUploadInRoutine(bucket, object, errorCh) + return errorCh +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_handlers_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_handlers_test.go new file mode 100644 index 000000000..146f4d6e1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api_handlers_test.go @@ -0,0 +1,170 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio_test + +// bucketHandler is an http.Handler that verifies bucket responses and validates incoming requests +import ( + "bytes" + "io" + "net/http" + "strconv" + "time" +) + +type bucketHandler struct { + resource string +} + +func (h bucketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + switch { + case r.Method == "GET": + switch { + case r.URL.Path == "/": + response := []byte("bucket2015-05-20T23:05:09.230Zminiominio") + w.Header().Set("Content-Length", strconv.Itoa(len(response))) + w.Write(response) + case r.URL.Path == "/bucket": + _, ok := r.URL.Query()["acl"] + if ok { + response := []byte("75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06aCustomersName@amazon.com75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06aCustomersName@amazon.comFULL_CONTROL") + w.Header().Set("Content-Length", strconv.Itoa(len(response))) + w.Write(response) + return + } + fallthrough + case r.URL.Path == "/bucket": + response := []byte("\"259d04a13802ae09c7e41be50ccc6baa\"object2015-05-21T18:24:21.097Z22061miniominioSTANDARDfalse1000testbucket") + w.Header().Set("Content-Length", strconv.Itoa(len(response))) + w.Write(response) + } + case r.Method == "PUT": + switch { + case r.URL.Path == h.resource: + _, ok := r.URL.Query()["acl"] + if ok { + switch r.Header.Get("x-amz-acl") { + case "public-read-write": + fallthrough + case "public-read": + fallthrough + case "private": + fallthrough + case "authenticated-read": + w.WriteHeader(http.StatusOK) + return + default: + w.WriteHeader(http.StatusNotImplemented) + return + } + } + w.WriteHeader(http.StatusOK) + default: + w.WriteHeader(http.StatusBadRequest) + } + case r.Method == "HEAD": + switch { + case r.URL.Path == h.resource: + w.WriteHeader(http.StatusOK) + default: + w.WriteHeader(http.StatusForbidden) + } + case r.Method == "DELETE": + switch { + case r.URL.Path != h.resource: + w.WriteHeader(http.StatusNotFound) + default: + h.resource = "" + w.WriteHeader(http.StatusNoContent) + } + } +} + +// objectHandler is an http.Handler that verifies object responses and validates incoming requests +type objectHandler struct { + resource string + data []byte +} + +func (h objectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + switch { + case r.Method == "PUT": + length, err := strconv.Atoi(r.Header.Get("Content-Length")) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + var buffer bytes.Buffer + _, err = io.CopyN(&buffer, r.Body, int64(length)) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + if !bytes.Equal(h.data, buffer.Bytes()) { + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set("ETag", "\"9af2f8218b150c351ad802c6f3d66abe\"") + w.WriteHeader(http.StatusOK) + case r.Method == "HEAD": + if r.URL.Path != h.resource { + w.WriteHeader(http.StatusNotFound) + return + } + w.Header().Set("Content-Length", strconv.Itoa(len(h.data))) + w.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat)) + w.Header().Set("ETag", "\"9af2f8218b150c351ad802c6f3d66abe\"") + w.WriteHeader(http.StatusOK) + case r.Method == "POST": + _, ok := r.URL.Query()["uploads"] + if ok { + response := []byte("example-bucketobjectXXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA") + w.Header().Set("Content-Length", strconv.Itoa(len(response))) + w.Write(response) + return + } + case r.Method == "GET": + _, ok := r.URL.Query()["uploadId"] + if ok { + uploadID := r.URL.Query().Get("uploadId") + if uploadID != "XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA" { + w.WriteHeader(http.StatusNotFound) + return + } + response := []byte("example-bucketexample-objectXXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZAarn:aws:iam::111122223333:user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xxumat-user-11116a31-17b5-4fb7-9df5-b288870f11xx75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06asomeNameSTANDARD132true22010-11-10T20:48:34.000Z\"7778aef83f66abc1fa1e8477f296d394\"1048576032010-11-10T20:48:33.000Z\"aaaa18db4cc2f85cedef654fccc4a4x8\"10485760") + w.Header().Set("Content-Length", strconv.Itoa(len(response))) + w.Write(response) + return + } + if r.URL.Path != h.resource { + w.WriteHeader(http.StatusNotFound) + return + } + w.Header().Set("Content-Length", strconv.Itoa(len(h.data))) + w.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat)) + w.Header().Set("ETag", "\"9af2f8218b150c351ad802c6f3d66abe\"") + w.WriteHeader(http.StatusOK) + io.Copy(w, bytes.NewReader(h.data)) + case r.Method == "DELETE": + if r.URL.Path != h.resource { + w.WriteHeader(http.StatusNotFound) + return + } + h.resource = "" + h.data = nil + w.WriteHeader(http.StatusNoContent) + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go new file mode 100644 index 000000000..b220744b2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go @@ -0,0 +1,110 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "strings" + "testing" +) + +func TestACLTypes(t *testing.T) { + want := map[string]bool{ + "private": true, + "public-read": true, + "public-read-write": true, + "authenticated-read": true, + "invalid": false, + } + for acl, ok := range want { + if BucketACL(acl).isValidBucketACL() != ok { + t.Fatal("Error") + } + } +} + +func TestUserAgent(t *testing.T) { + conf := new(Config) + conf.SetUserAgent("minio", "1.0", "amd64") + if !strings.Contains(conf.userAgent, "minio") { + t.Fatalf("Error") + } +} + +func TestGetRegion(t *testing.T) { + region := getRegion("s3.amazonaws.com") + if region != "us-east-1" { + t.Fatalf("Error") + } + region = getRegion("localhost:9000") + if region != "milkyway" { + t.Fatalf("Error") + } +} + +func TestPartSize(t *testing.T) { + var maxPartSize int64 = 1024 * 1024 * 1024 * 5 + partSize := calculatePartSize(5000000000000000000) + if partSize > minimumPartSize { + if partSize > maxPartSize { + t.Fatal("invalid result, cannot be bigger than maxPartSize 5GB") + } + } + partSize = calculatePartSize(50000000000) + if partSize > minimumPartSize { + t.Fatal("invalid result, cannot be bigger than minimumPartSize 5MB") + } +} + +func TestURLEncoding(t *testing.T) { + type urlStrings struct { + name string + encodedName string + } + + want := []urlStrings{ + { + name: "bigfile-1._%", + encodedName: "bigfile-1._%25", + }, + { + name: "本語", + encodedName: "%E6%9C%AC%E8%AA%9E", + }, + { + name: "本語.1", + encodedName: "%E6%9C%AC%E8%AA%9E.1", + }, + { + name: ">123>3123123", + encodedName: "%3E123%3E3123123", + }, + { + name: "test 1 2.txt", + encodedName: "test%201%202.txt", + }, + { + name: "test++ 1.txt", + encodedName: "test%2B%2B%201.txt", + }, + } + + for _, u := range want { + if u.encodedName != getURLEncodedPath(u.name) { + t.Errorf("Error") + } + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_public_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_public_test.go new file mode 100644 index 000000000..cf82c4812 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api_public_test.go @@ -0,0 +1,287 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio_test + +import ( + "bytes" + "io" + "net/http/httptest" + "testing" + "time" + + "github.com/minio/minio-go" +) + +func TestBucketOperations(t *testing.T) { + bucket := bucketHandler(bucketHandler{ + resource: "/bucket", + }) + server := httptest.NewServer(bucket) + defer server.Close() + + a, err := minio.New(minio.Config{Endpoint: server.URL}) + if err != nil { + t.Fatal("Error") + } + err = a.MakeBucket("bucket", "private") + if err != nil { + t.Fatal("Error") + } + + err = a.BucketExists("bucket") + if err != nil { + t.Fatal("Error") + } + + err = a.BucketExists("bucket1") + if err == nil { + t.Fatal("Error") + } + if err.Error() != "Access Denied" { + t.Fatal("Error") + } + + err = a.SetBucketACL("bucket", "public-read-write") + if err != nil { + t.Fatal("Error") + } + + acl, err := a.GetBucketACL("bucket") + if err != nil { + t.Fatal("Error") + } + if acl != minio.BucketACL("private") { + t.Fatal("Error") + } + + for b := range a.ListBuckets() { + if b.Err != nil { + t.Fatal(b.Err.Error()) + } + if b.Stat.Name != "bucket" { + t.Fatal("Error") + } + } + + for o := range a.ListObjects("bucket", "", true) { + if o.Err != nil { + t.Fatal(o.Err.Error()) + } + if o.Stat.Key != "object" { + t.Fatal("Error") + } + } + + err = a.RemoveBucket("bucket") + if err != nil { + t.Fatal("Error") + } + + err = a.RemoveBucket("bucket1") + if err == nil { + t.Fatal("Error") + } + if err.Error() != "The specified bucket does not exist." { + t.Fatal("Error") + } +} + +func TestBucketOperationsFail(t *testing.T) { + bucket := bucketHandler(bucketHandler{ + resource: "/bucket", + }) + server := httptest.NewServer(bucket) + defer server.Close() + + a, err := minio.New(minio.Config{Endpoint: server.URL}) + if err != nil { + t.Fatal("Error") + } + err = a.MakeBucket("bucket$$$", "private") + if err == nil { + t.Fatal("Error") + } + + err = a.BucketExists("bucket.") + if err == nil { + t.Fatal("Error") + } + + err = a.SetBucketACL("bucket-.", "public-read-write") + if err == nil { + t.Fatal("Error") + } + + _, err = a.GetBucketACL("bucket??") + if err == nil { + t.Fatal("Error") + } + + for o := range a.ListObjects("bucket??", "", true) { + if o.Err == nil { + t.Fatal(o.Err.Error()) + } + } + + err = a.RemoveBucket("bucket??") + if err == nil { + t.Fatal("Error") + } + + if err.Error() != "The specified bucket is not valid." { + t.Fatal("Error") + } +} + +func TestObjectOperations(t *testing.T) { + object := objectHandler(objectHandler{ + resource: "/bucket/object", + data: []byte("Hello, World"), + }) + server := httptest.NewServer(object) + defer server.Close() + + a, err := minio.New(minio.Config{Endpoint: server.URL}) + if err != nil { + t.Fatal("Error") + } + data := []byte("Hello, World") + err = a.PutObject("bucket", "object", "", int64(len(data)), bytes.NewReader(data)) + if err != nil { + t.Fatal("Error") + } + metadata, err := a.StatObject("bucket", "object") + if err != nil { + t.Fatal("Error") + } + if metadata.Key != "object" { + t.Fatal("Error") + } + if metadata.ETag != "9af2f8218b150c351ad802c6f3d66abe" { + t.Fatal("Error") + } + + reader, metadata, err := a.GetObject("bucket", "object") + if err != nil { + t.Fatal("Error") + } + if metadata.Key != "object" { + t.Fatal("Error") + } + if metadata.ETag != "9af2f8218b150c351ad802c6f3d66abe" { + t.Fatal("Error") + } + + var buffer bytes.Buffer + _, err = io.Copy(&buffer, reader) + if !bytes.Equal(buffer.Bytes(), data) { + t.Fatal("Error") + } + + err = a.RemoveObject("bucket", "object") + if err != nil { + t.Fatal("Error") + } + err = a.RemoveObject("bucket", "object1") + if err == nil { + t.Fatal("Error") + } + if err.Error() != "The specified key does not exist." { + t.Fatal("Error") + } +} + +func TestPresignedURL(t *testing.T) { + object := objectHandler(objectHandler{ + resource: "/bucket/object", + data: []byte("Hello, World"), + }) + server := httptest.NewServer(object) + defer server.Close() + + a, err := minio.New(minio.Config{Endpoint: server.URL}) + if err != nil { + t.Fatal("Error") + } + // should error out for invalid access keys + _, err = a.PresignedGetObject("bucket", "object", time.Duration(1000)*time.Second) + if err == nil { + t.Fatal("Error") + } + + a, err = minio.New(minio.Config{ + Endpoint: server.URL, + AccessKeyID: "accessKey", + SecretAccessKey: "secretKey", + }) + if err != nil { + t.Fatal("Error") + } + url, err := a.PresignedGetObject("bucket", "object", time.Duration(1000)*time.Second) + if err != nil { + t.Fatal("Error") + } + if url == "" { + t.Fatal("Error") + } + _, err = a.PresignedGetObject("bucket", "object", time.Duration(0)*time.Second) + if err == nil { + t.Fatal("Error") + } + _, err = a.PresignedGetObject("bucket", "object", time.Duration(604801)*time.Second) + if err == nil { + t.Fatal("Error") + } +} + +func TestErrorResponse(t *testing.T) { + errorResponse := []byte("AccessDeniedAccess Denied/mybucket/myphoto.jpgF19772218238A85AGuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD") + errorReader := bytes.NewReader(errorResponse) + err := minio.BodyToErrorResponse(errorReader, "application/xml") + if err == nil { + t.Fatal("Error") + } + if err.Error() != "Access Denied" { + t.Fatal("Error") + } + resp := minio.ToErrorResponse(err) + // valid all fields + if resp == nil { + t.Fatal("Error") + } + if resp.Code != "AccessDenied" { + t.Fatal("Error") + } + if resp.RequestID != "F19772218238A85A" { + t.Fatal("Error") + } + if resp.Message != "Access Denied" { + t.Fatal("Error") + } + if resp.Resource != "/mybucket/myphoto.jpg" { + t.Fatal("Error") + } + if resp.HostID != "GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD" { + t.Fatal("Error") + } + if resp.ToXML() == "" { + t.Fatal("Error") + } + if resp.ToJSON() == "" { + t.Fatal("Error") + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml b/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml new file mode 100644 index 000000000..1d140afd9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml @@ -0,0 +1,41 @@ +# version format +version: "{build}" + +# Operating system (build VM template) +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\minio\minio-go + +# environment variables +environment: + GOPATH: c:\gopath + GO15VENDOREXPERIMENT: 1 + +# scripts that run after cloning repository +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - rd C:\Go /s /q + - appveyor DownloadFile https://storage.googleapis.com/golang/go1.5.1.windows-amd64.zip + - 7z x go1.5.1.windows-amd64.zip -oC:\ >nul + - go version + - go env + - go get -u github.com/golang/lint/golint + - go get -u golang.org/x/tools/cmd/vet + - go get -u github.com/fzipp/gocyclo + - go get -u github.com/remyoudompheng/go-misc/deadcode + +# to run your custom scripts instead of automatic MSBuild +build_script: + - go vet ./... + - gofmt -s -l . + - golint github.com/minio/minio-go... + - gocyclo -over 30 . + - deadcode + - go test + - go test -race + +# to disable automatic tests +test: off + +# to disable deployment +deploy: off diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/bucket-acl.go b/Godeps/_workspace/src/github.com/minio/minio-go/bucket-acl.go new file mode 100644 index 000000000..5718dbbd3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/bucket-acl.go @@ -0,0 +1,75 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// BucketACL - bucket level access control +type BucketACL string + +// different types of ACL's currently supported for buckets +const ( + bucketPrivate = BucketACL("private") + bucketReadOnly = BucketACL("public-read") + bucketPublic = BucketACL("public-read-write") + bucketAuthenticated = BucketACL("authenticated-read") +) + +// String printer helper +func (b BucketACL) String() string { + if string(b) == "" { + return "private" + } + return string(b) +} + +// isValidBucketACL - is provided acl string supported +func (b BucketACL) isValidBucketACL() bool { + switch true { + case b.isPrivate(): + fallthrough + case b.isReadOnly(): + fallthrough + case b.isPublic(): + fallthrough + case b.isAuthenticated(): + return true + case b.String() == "private": + // by default its "private" + return true + default: + return false + } +} + +// IsPrivate - is acl Private +func (b BucketACL) isPrivate() bool { + return b == bucketPrivate +} + +// IsPublicRead - is acl PublicRead +func (b BucketACL) isReadOnly() bool { + return b == bucketReadOnly +} + +// IsPublicReadWrite - is acl PublicReadWrite +func (b BucketACL) isPublic() bool { + return b == bucketPublic +} + +// IsAuthenticated - is acl AuthenticatedRead +func (b BucketACL) isAuthenticated() bool { + return b == bucketAuthenticated +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/chopper.go b/Godeps/_workspace/src/github.com/minio/minio-go/chopper.go new file mode 100644 index 000000000..6b2ff9a19 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/chopper.go @@ -0,0 +1,136 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "crypto/md5" + "io" +) + +// part - message structure for results from the MultiPart +type part struct { + MD5Sum []byte + ReadSeeker io.ReadSeeker + Err error + Len int64 + Num int // part number +} + +// skipPart - skipping uploaded parts +type skipPart struct { + md5sum []byte + partNumber int +} + +// chopper reads from io.Reader, partitions the data into chunks of given chunksize, and sends +// each chunk as io.ReadSeeker to the caller over a channel +// +// This method runs until an EOF or error occurs. If an error occurs, +// the method sends the error over the channel and returns. +// Before returning, the channel is always closed. +// +// additionally this function also skips list of parts if provided +func chopper(reader io.Reader, chunkSize int64, skipParts []skipPart) <-chan part { + ch := make(chan part, 3) + go chopperInRoutine(reader, chunkSize, skipParts, ch) + return ch +} + +func chopperInRoutine(reader io.Reader, chunkSize int64, skipParts []skipPart, ch chan part) { + defer close(ch) + p := make([]byte, chunkSize) + n, err := io.ReadFull(reader, p) + if err == io.EOF || err == io.ErrUnexpectedEOF { // short read, only single part return + m := md5.Sum(p[0:n]) + ch <- part{ + MD5Sum: m[:], + ReadSeeker: bytes.NewReader(p[0:n]), + Err: nil, + Len: int64(n), + Num: 1, + } + return + } + // catastrophic error send error and return + if err != nil { + ch <- part{ + ReadSeeker: nil, + Err: err, + Num: 0, + } + return + } + // send the first part + var num = 1 + md5SumBytes := md5.Sum(p) + sp := skipPart{ + partNumber: num, + md5sum: md5SumBytes[:], + } + if !isPartNumberUploaded(sp, skipParts) { + ch <- part{ + MD5Sum: md5SumBytes[:], + ReadSeeker: bytes.NewReader(p), + Err: nil, + Len: int64(n), + Num: num, + } + } + for err == nil { + var n int + p := make([]byte, chunkSize) + n, err = io.ReadFull(reader, p) + if err != nil { + if err != io.EOF && err != io.ErrUnexpectedEOF { // catastrophic error + ch <- part{ + ReadSeeker: nil, + Err: err, + Num: 0, + } + return + } + } + num++ + md5SumBytes := md5.Sum(p[0:n]) + sp := skipPart{ + partNumber: num, + md5sum: md5SumBytes[:], + } + if isPartNumberUploaded(sp, skipParts) { + continue + } + ch <- part{ + MD5Sum: md5SumBytes[:], + ReadSeeker: bytes.NewReader(p[0:n]), + Err: nil, + Len: int64(n), + Num: num, + } + + } +} + +// to verify if partNumber is part of the skip part list +func isPartNumberUploaded(part skipPart, skipParts []skipPart) bool { + for _, p := range skipParts { + if p.partNumber == part.partNumber && bytes.Equal(p.md5sum, part.md5sum) { + return true + } + } + return false +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/common.go b/Godeps/_workspace/src/github.com/minio/minio-go/common.go new file mode 100644 index 000000000..8ac854681 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/common.go @@ -0,0 +1,115 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "encoding/xml" + "io" + "strings" + "time" +) + +// decoder provides a unified decoding method interface +type decoder interface { + Decode(v interface{}) error +} + +// acceptTypeDecoder provide decoded value in given acceptType +func acceptTypeDecoder(body io.Reader, acceptType string, v interface{}) error { + var d decoder + switch { + case acceptType == "application/xml": + d = xml.NewDecoder(body) + case acceptType == "application/json": + d = json.NewDecoder(body) + default: + d = xml.NewDecoder(body) + } + return d.Decode(v) +} + +// sum256Reader calculate sha256 sum for an input read seeker +func sum256Reader(reader io.ReadSeeker) ([]byte, error) { + h := sha256.New() + var err error + + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + for err == nil { + length := 0 + byteBuffer := make([]byte, 1024*1024) + length, err = reader.Read(byteBuffer) + byteBuffer = byteBuffer[0:length] + h.Write(byteBuffer) + } + + if err != io.EOF { + return nil, err + } + + return h.Sum(nil), nil +} + +// sum256 calculate sha256 sum for an input byte array +func sum256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// sumHMAC calculate hmac between two input byte array +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +// getSigningKey hmac seed to calculate final signature +func getSigningKey(secret, region string, t time.Time) []byte { + date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) + regionbytes := sumHMAC(date, []byte(region)) + service := sumHMAC(regionbytes, []byte("s3")) + signingKey := sumHMAC(service, []byte("aws4_request")) + return signingKey +} + +// getSignature final signature in hexadecimal form +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// getScope generate a string of a specific date, an AWS region, and a service +func getScope(region string, t time.Time) string { + scope := strings.Join([]string{ + t.Format(yyyymmdd), + region, + "s3", + "aws4_request", + }, "/") + return scope +} + +// getCredential generate a credential string +func getCredential(accessKeyID, region string, t time.Time) string { + scope := getScope(region, t) + return accessKeyID + "/" + scope +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/definitions.go b/Godeps/_workspace/src/github.com/minio/minio-go/definitions.go new file mode 100644 index 000000000..a9a69db6b --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/definitions.go @@ -0,0 +1,181 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "time" +) + +// listAllMyBucketsResult container for listBuckets response +type listAllMyBucketsResult struct { + // Container for one or more buckets. + Buckets struct { + Bucket []BucketStat + } + Owner owner +} + +// owner container for bucket owner information +type owner struct { + DisplayName string + ID string +} + +// commonPrefix container for prefix response +type commonPrefix struct { + Prefix string +} + +// listBucketResult container for listObjects response +type listBucketResult struct { + CommonPrefixes []commonPrefix // A response can contain CommonPrefixes only if you have specified a delimiter + Contents []ObjectStat // Metadata about each object returned + Delimiter string + + // Encoding type used to encode object keys in the response. + EncodingType string + + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + Marker string + MaxKeys int64 + Name string + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Object storage lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMaker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker string + Prefix string +} + +// listMultipartUploadsResult container for ListMultipartUploads response +type listMultipartUploadsResult struct { + Bucket string + KeyMarker string + UploadIDMarker string `xml:"UploadIdMarker"` + NextKeyMarker string + NextUploadIDMarker string `xml:"NextUploadIdMarker"` + EncodingType string + MaxUploads int64 + IsTruncated bool + Uploads []ObjectMultipartStat `xml:"Upload"` + Prefix string + Delimiter string + CommonPrefixes []commonPrefix // A response can contain CommonPrefixes only if you specify a delimiter +} + +// initiator container for who initiated multipart upload +type initiator struct { + ID string + DisplayName string +} + +// partMetadata container for particular part of an object +type partMetadata struct { + // Part number identifies the part. + PartNumber int + + // Date and time the part was uploaded. + LastModified time.Time + + // Entity tag returned when the part was uploaded, usually md5sum of the part + ETag string + + // Size of the uploaded part data. + Size int64 +} + +// listObjectPartsResult container for ListObjectParts response. +type listObjectPartsResult struct { + Bucket string + Key string + UploadID string `xml:"UploadId"` + + Initiator initiator + Owner owner + + StorageClass string + PartNumberMarker int + NextPartNumberMarker int + MaxParts int + + // Indicates whether the returned list of parts is truncated. + IsTruncated bool + Parts []partMetadata `xml:"Part"` + + EncodingType string +} + +// initiateMultipartUploadResult container for InitiateMultiPartUpload response. +type initiateMultipartUploadResult struct { + Bucket string + Key string + UploadID string `xml:"UploadId"` +} + +// completeMultipartUploadResult container for completed multipart upload response. +type completeMultipartUploadResult struct { + Location string + Bucket string + Key string + ETag string +} + +// completePart sub container lists individual part numbers and their md5sum, part of completeMultipartUpload. +type completePart struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` + + // Part number identifies the part. + PartNumber int + ETag string +} + +// completeMultipartUpload container for completing multipart upload +type completeMultipartUpload struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` + Parts []completePart `xml:"Part"` +} + +// createBucketConfiguration container for bucket configuration +type createBucketConfiguration struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"` + Location string `xml:"LocationConstraint"` +} + +type grant struct { + Grantee struct { + ID string + DisplayName string + EmailAddress string + Type string + URI string + } + Permission string +} + +type accessControlPolicy struct { + AccessControlList struct { + Grant []grant + } + Owner owner +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/errors.go b/Godeps/_workspace/src/github.com/minio/minio-go/errors.go new file mode 100644 index 000000000..5626cf0c6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/errors.go @@ -0,0 +1,168 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/json" + "encoding/xml" + "io" + "regexp" + "strings" + "unicode/utf8" +) + +/* **** SAMPLE ERROR RESPONSE **** + + + AccessDenied + Access Denied + /mybucket/myphoto.jpg + F19772218238A85A + GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD + +*/ + +// ErrorResponse is the type error returned by some API operations. +type ErrorResponse struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + Resource string + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` +} + +// ToErrorResponse returns parsed ErrorResponse struct, if input is nil or not ErrorResponse return value is nil +// this fuction is useful when some one wants to dig deeper into the error structures over the network. +// +// for example: +// +// import s3 "github.com/minio/minio-go" +// ... +// ... +// ..., err := s3.GetObject(...) +// if err != nil { +// resp := s3.ToErrorResponse(err) +// fmt.Println(resp.ToXML()) +// } +// ... +// ... +func ToErrorResponse(err error) *ErrorResponse { + switch err := err.(type) { + case ErrorResponse: + return &err + default: + return nil + } +} + +// ToXML send raw xml marshalled as string +func (e ErrorResponse) ToXML() string { + b, err := xml.Marshal(&e) + if err != nil { + panic(err) + } + return string(b) +} + +// ToJSON send raw json marshalled as string +func (e ErrorResponse) ToJSON() string { + b, err := json.Marshal(&e) + if err != nil { + panic(err) + } + return string(b) +} + +// Error formats HTTP error string +func (e ErrorResponse) Error() string { + return e.Message +} + +// BodyToErrorResponse returns a new encoded ErrorResponse structure +func BodyToErrorResponse(errBody io.Reader, acceptType string) error { + var errorResponse ErrorResponse + err := acceptTypeDecoder(errBody, acceptType, &errorResponse) + if err != nil { + return err + } + return errorResponse +} + +// invalidBucketToError - invalid bucket to errorResponse +func invalidBucketError(bucket string) error { + // verify bucket name in accordance with + // - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html + isValidBucket := func(bucket string) bool { + if len(bucket) < 3 || len(bucket) > 63 { + return false + } + if bucket[0] == '.' || bucket[len(bucket)-1] == '.' { + return false + } + if match, _ := regexp.MatchString("\\.\\.", bucket); match == true { + return false + } + // We don't support buckets with '.' in them + match, _ := regexp.MatchString("^[a-zA-Z][a-zA-Z0-9\\-]+[a-zA-Z0-9]$", bucket) + return match + } + + if !isValidBucket(strings.TrimSpace(bucket)) { + // no resource since bucket is empty string + errorResponse := ErrorResponse{ + Code: "InvalidBucketName", + Message: "The specified bucket is not valid.", + RequestID: "minio", + } + return errorResponse + } + return nil +} + +// invalidObjectError invalid object name to errorResponse +func invalidObjectError(object string) error { + if strings.TrimSpace(object) == "" || object == "" { + // no resource since object name is empty + errorResponse := ErrorResponse{ + Code: "NoSuchKey", + Message: "The specified key does not exist.", + RequestID: "minio", + } + return errorResponse + } + return nil +} + +// invalidArgumentError invalid argument to errorResponse +func invalidArgumentError(arg string) error { + errorResponse := ErrorResponse{ + Code: "InvalidArgument", + Message: "Invalid Argument", + RequestID: "minio", + } + if strings.TrimSpace(arg) == "" || arg == "" { + // no resource since arg is empty string + return errorResponse + } + if !utf8.ValidString(arg) { + // add resource to reply back with invalid string + errorResponse.Resource = arg + return errorResponse + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/bucketexists.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/bucketexists.go new file mode 100644 index 000000000..cb9e3e288 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/bucketexists.go @@ -0,0 +1,40 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + Endpoint: "https://play.minio.io:9000", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + err = s3Client.BucketExists("mybucket") + if err != nil { + log.Fatalln(err) + } + log.Println("Success") +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getbucketacl.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getbucketacl.go new file mode 100644 index 000000000..5b0cec786 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getbucketacl.go @@ -0,0 +1,41 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + Endpoint: "https://play.minio.io:9000", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + acl, err := s3Client.GetBucketACL("mybucket") + if err != nil { + log.Fatalln(err) + } + log.Println(acl) + +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobject.go new file mode 100644 index 000000000..71a6d92a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobject.go @@ -0,0 +1,51 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "io" + "log" + "os" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + Endpoint: "https://play.minio.io:9000", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + reader, stat, err := s3Client.GetObject("mybucket", "myobject") + if err != nil { + log.Fatalln(err) + } + + localfile, err := os.Create("testfile") + if err != nil { + log.Fatalln(err) + } + defer localfile.Close() + + if _, err = io.CopyN(localfile, reader, stat.Size); err != nil { + log.Fatalln(err) + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getpartialobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getpartialobject.go new file mode 100644 index 000000000..b4e2c54b4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getpartialobject.go @@ -0,0 +1,51 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "io" + "log" + "os" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + Endpoint: "https://play.minio.io:9000", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + reader, stat, err := s3Client.GetPartialObject("mybucket", "myobject", 0, 10) + if err != nil { + log.Fatalln(err) + } + + localfile, err := os.Create("testfile") + if err != nil { + log.Fatalln(err) + } + defer localfile.Close() + + if _, err = io.CopyN(localfile, reader, stat.Size); err != nil { + log.Fatalln(err) + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listbuckets.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listbuckets.go new file mode 100644 index 000000000..8148ba8a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listbuckets.go @@ -0,0 +1,41 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + Endpoint: "https://play.minio.io:9000", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + for bucket := range s3Client.ListBuckets() { + if bucket.Err != nil { + log.Fatalln(bucket.Err) + } + log.Println(bucket.Stat) + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listincompleteuploads.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listincompleteuploads.go new file mode 100644 index 000000000..f73833aca --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listincompleteuploads.go @@ -0,0 +1,44 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://play.minio.io:9000", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + // Recursive + for multipartObject := range s3Client.ListIncompleteUploads("mybucket", "myobject", true) { + if multipartObject.Err != nil { + log.Fatalln(multipartObject.Err) + } + log.Println(multipartObject) + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listobjects.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listobjects.go new file mode 100644 index 000000000..1908d7224 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listobjects.go @@ -0,0 +1,41 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + Endpoint: "https://play.minio.io:9000", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + for object := range s3Client.ListObjects("mybucket", "", true) { + if object.Err != nil { + log.Fatalln(object.Err) + } + log.Println(object.Stat) + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/makebucket.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/makebucket.go new file mode 100644 index 000000000..1fcfb7151 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/makebucket.go @@ -0,0 +1,40 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + Endpoint: "https://play.minio.io:9000", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + err = s3Client.MakeBucket("mybucket", "") + if err != nil { + log.Fatalln(err) + } + log.Println("Success") +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobject.go new file mode 100644 index 000000000..5cf057286 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobject.go @@ -0,0 +1,52 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + "os" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + Endpoint: "https://play.minio.io:9000", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + object, err := os.Open("testfile") + if err != nil { + log.Fatalln(err) + } + defer object.Close() + objectInfo, err := object.Stat() + if err != nil { + object.Close() + log.Fatalln(err) + } + + err = s3Client.PutObject("mybucket", "myobject", "application/octet-stream", objectInfo.Size(), object) + if err != nil { + log.Fatalln(err) + } + +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removebucket.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removebucket.go new file mode 100644 index 000000000..6004c90e2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removebucket.go @@ -0,0 +1,43 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://play.minio.io:9000", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + err = s3Client.RemoveBucket("mybucket") + if err != nil { + log.Fatalln(err) + } + log.Println("Success") + +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeincompleteupload.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeincompleteupload.go new file mode 100644 index 000000000..4d5b49c1c --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeincompleteupload.go @@ -0,0 +1,41 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + Endpoint: "https://play.minio.io:9000", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + for err := range s3Client.RemoveIncompleteUpload("mybucket", "myobject") { + if err != nil { + log.Fatalln(err) + } + } + log.Println("Success") +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeobject.go new file mode 100644 index 000000000..4447b65ab --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeobject.go @@ -0,0 +1,42 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://play.minio.io:9000", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + err = s3Client.RemoveObject("mybucket", "myobject") + if err != nil { + log.Fatalln(err) + } + log.Println("Success") +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/setbucketacl.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/setbucketacl.go new file mode 100644 index 000000000..f85d1256a --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/setbucketacl.go @@ -0,0 +1,40 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + Endpoint: "https://play.minio.io:9000", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + err = s3Client.SetBucketACL("mybucket", minio.BucketACL("public-read-write")) + if err != nil { + log.Fatalln(err) + } + +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/statobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/statobject.go new file mode 100644 index 000000000..bb3844900 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/statobject.go @@ -0,0 +1,40 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + Endpoint: "https://play.minio.io:9000", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + stat, err := s3Client.StatObject("mybucket", "myobject") + if err != nil { + log.Fatalln(err) + } + log.Println(stat) +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/bucketexists.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/bucketexists.go new file mode 100644 index 000000000..7b0b17f82 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/bucketexists.go @@ -0,0 +1,42 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://s3.amazonaws.com", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + err = s3Client.BucketExists("mybucket") + if err != nil { + log.Fatalln(err) + } + log.Println("Success") +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getbucketacl.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getbucketacl.go new file mode 100644 index 000000000..c9fbe78c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getbucketacl.go @@ -0,0 +1,43 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://s3.amazonaws.com", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + acl, err := s3Client.GetBucketACL("mybucket") + if err != nil { + log.Fatalln(err) + } + log.Println(acl) + +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobject.go new file mode 100644 index 000000000..d0082d90a --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobject.go @@ -0,0 +1,53 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "io" + "log" + "os" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://s3.amazonaws.com", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + reader, stat, err := s3Client.GetObject("mybucket", "myobject") + if err != nil { + log.Fatalln(err) + } + + localfile, err := os.Create("testfile") + if err != nil { + log.Fatalln(err) + } + defer localfile.Close() + + if _, err = io.CopyN(localfile, reader, stat.Size); err != nil { + log.Fatalln(err) + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getpartialobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getpartialobject.go new file mode 100644 index 000000000..591b4be3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getpartialobject.go @@ -0,0 +1,53 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "io" + "log" + "os" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://s3.amazonaws.com", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + reader, stat, err := s3Client.GetPartialObject("mybucket", "myobject", 0, 10) + if err != nil { + log.Fatalln(err) + } + + localfile, err := os.Create("testfile") + if err != nil { + log.Fatalln(err) + } + defer localfile.Close() + + if _, err = io.CopyN(localfile, reader, stat.Size); err != nil { + log.Fatalln(err) + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listbuckets.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listbuckets.go new file mode 100644 index 000000000..5aff5a1a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listbuckets.go @@ -0,0 +1,43 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://s3.amazonaws.com", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + for bucket := range s3Client.ListBuckets() { + if bucket.Err != nil { + log.Fatalln(bucket.Err) + } + log.Println(bucket.Stat) + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listincompleteuploads.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listincompleteuploads.go new file mode 100644 index 000000000..0ceab2b28 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listincompleteuploads.go @@ -0,0 +1,44 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://s3.amazonaws.com", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + // Recursive + for multipartObject := range s3Client.ListIncompleteUploads("mybucket", "myobject", true) { + if multipartObject.Err != nil { + log.Fatalln(multipartObject.Err) + } + log.Println(multipartObject) + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listobjects.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listobjects.go new file mode 100644 index 000000000..a091fbbf4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listobjects.go @@ -0,0 +1,43 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://s3.amazonaws.com", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + for object := range s3Client.ListObjects("mybucket", "", true) { + if object.Err != nil { + log.Fatalln(object.Err) + } + log.Println(object.Stat) + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/makebucket.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/makebucket.go new file mode 100644 index 000000000..5b97ca128 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/makebucket.go @@ -0,0 +1,42 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://s3.amazonaws.com", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + err = s3Client.MakeBucket("mybucket", "") + if err != nil { + log.Fatalln(err) + } + log.Println("Success") +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedgetobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedgetobject.go new file mode 100644 index 000000000..fc96bb002 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedgetobject.go @@ -0,0 +1,43 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + "time" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://s3.amazonaws.com", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + string, err := s3Client.PresignedGetObject("mybucket", "myobject", time.Duration(1000)*time.Second) + if err != nil { + log.Fatalln(err) + } + log.Println(string) +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go new file mode 100644 index 000000000..c41cae461 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go @@ -0,0 +1,54 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "fmt" + "log" + "time" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://s3.amazonaws.com", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + policy := minio.NewPostPolicy() + policy.SetKey("myobject") + policy.SetBucket("mybucket") + policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days + m, err := s3Client.PresignedPostPolicy(policy) + if err != nil { + fmt.Println(err) + return + } + fmt.Printf("curl ") + for k, v := range m { + fmt.Printf("-F %s=%s ", k, v) + } + fmt.Printf("-F file=@/etc/bashrc ") + fmt.Printf(config.Endpoint + "/mybucket\n") +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedputobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedputobject.go new file mode 100644 index 000000000..7675cabb8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedputobject.go @@ -0,0 +1,43 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + "time" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://s3.amazonaws.com", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + string, err := s3Client.PresignedPutObject("mybucket", "myobject", time.Duration(1000)*time.Second) + if err != nil { + log.Fatalln(err) + } + log.Println(string) +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobject.go new file mode 100644 index 000000000..b67832b7f --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobject.go @@ -0,0 +1,54 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + "os" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://s3.amazonaws.com", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + object, err := os.Open("testfile") + if err != nil { + log.Fatalln(err) + } + defer object.Close() + objectInfo, err := object.Stat() + if err != nil { + object.Close() + log.Fatalln(err) + } + + err = s3Client.PutObject("mybucket", "myobject", "application/octet-stream", objectInfo.Size(), object) + if err != nil { + log.Fatalln(err) + } + +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removebucket.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removebucket.go new file mode 100644 index 000000000..65f9e16d9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removebucket.go @@ -0,0 +1,43 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://s3.amazonaws.com", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + err = s3Client.RemoveBucket("mybucket") + if err != nil { + log.Fatalln(err) + } + log.Println("Success") + +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeincompleteupload.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeincompleteupload.go new file mode 100644 index 000000000..cb78304d3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeincompleteupload.go @@ -0,0 +1,43 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://s3.amazonaws.com", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + for err := range s3Client.RemoveIncompleteUpload("mybucket", "myobject") { + if err != nil { + log.Fatalln(err) + } + } + log.Println("Success") +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeobject.go new file mode 100644 index 000000000..07761ebd9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeobject.go @@ -0,0 +1,42 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://s3.amazonaws.com", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + err = s3Client.RemoveObject("mybucket", "myobject") + if err != nil { + log.Fatalln(err) + } + log.Println("Success") +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/setbucketacl.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/setbucketacl.go new file mode 100644 index 000000000..dfe3af630 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/setbucketacl.go @@ -0,0 +1,42 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://s3.amazonaws.com", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + err = s3Client.SetBucketACL("mybucket", minio.BucketACL("public-read-write")) + if err != nil { + log.Fatalln(err) + } + +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/statobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/statobject.go new file mode 100644 index 000000000..400670f19 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/statobject.go @@ -0,0 +1,42 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + config := minio.Config{ + AccessKeyID: "YOUR-ACCESS-KEY-HERE", + SecretAccessKey: "YOUR-PASSWORD-HERE", + Endpoint: "https://s3.amazonaws.com", + } + s3Client, err := minio.New(config) + if err != nil { + log.Fatalln(err) + } + stat, err := s3Client.StatObject("mybucket", "myobject") + if err != nil { + log.Fatalln(err) + } + log.Println(stat) +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/post-policy.go b/Godeps/_workspace/src/github.com/minio/minio-go/post-policy.go new file mode 100644 index 000000000..a1637545a --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/post-policy.go @@ -0,0 +1,152 @@ +package minio + +import ( + "encoding/base64" + "errors" + "fmt" + "strings" + "time" +) + +// expirationDateFormat date format for expiration key in json policy +const expirationDateFormat = "2006-01-02T15:04:05.999Z" + +// Policy explanation: http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html +type policy struct { + matchType string + key string + value string +} + +// PostPolicy provides strict static type conversion and validation for Amazon S3's POST policy JSON string. +type PostPolicy struct { + expiration time.Time // expiration date and time of the POST policy. + policies []policy + contentLengthRange struct { + min int + max int + } + + // Post form data + formData map[string]string +} + +// NewPostPolicy instantiate new post policy +func NewPostPolicy() *PostPolicy { + p := &PostPolicy{} + p.policies = make([]policy, 0) + p.formData = make(map[string]string) + return p +} + +// SetExpires expiration time +func (p *PostPolicy) SetExpires(t time.Time) error { + if t.IsZero() { + return errors.New("time input invalid") + } + p.expiration = t + return nil +} + +// SetKey Object name +func (p *PostPolicy) SetKey(key string) error { + if strings.TrimSpace(key) == "" || key == "" { + return errors.New("key invalid") + } + policy := policy{"eq", "$key", key} + p.policies = append(p.policies, policy) + p.formData["key"] = key + return nil +} + +// SetKeyStartsWith Object name that can start with +func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { + if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { + return errors.New("key-starts-with invalid") + } + policy := policy{"starts-with", "$key", keyStartsWith} + p.policies = append(p.policies, policy) + p.formData["key"] = keyStartsWith + return nil +} + +// SetBucket bucket name +func (p *PostPolicy) SetBucket(bucket string) error { + if strings.TrimSpace(bucket) == "" || bucket == "" { + return errors.New("bucket invalid") + } + policy := policy{"eq", "$bucket", bucket} + p.policies = append(p.policies, policy) + p.formData["bucket"] = bucket + return nil +} + +// SetContentType content-type +func (p *PostPolicy) SetContentType(contentType string) error { + if strings.TrimSpace(contentType) == "" || contentType == "" { + return errors.New("contentType invalid") + } + policy := policy{"eq", "$Content-Type", contentType} + if err := p.addNewPolicy(policy); err != nil { + return err + } + p.formData["Content-Type"] = contentType + return nil +} + +// SetContentLength - set new min and max content legnth condition +func (p *PostPolicy) SetContentLength(min, max int) error { + if min > max { + return errors.New("minimum cannot be bigger than maximum") + } + if min < 0 { + return errors.New("minimum cannot be negative") + } + if max < 0 { + return errors.New("maximum cannot be negative") + } + p.contentLengthRange.min = min + p.contentLengthRange.max = max + return nil +} + +// addNewPolicy - internal helper to validate adding new policies +func (p *PostPolicy) addNewPolicy(po policy) error { + if po.matchType == "" || po.key == "" || po.value == "" { + return errors.New("policy invalid") + } + p.policies = append(p.policies, po) + return nil +} + +// Stringer interface for printing in pretty manner +func (p PostPolicy) String() string { + return string(p.marshalJSON()) +} + +// marshalJSON provides Marshalled JSON +func (p PostPolicy) marshalJSON() []byte { + expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` + var policiesStr string + policies := []string{} + for _, po := range p.policies { + policies = append(policies, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.key, po.value)) + } + if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 { + policies = append(policies, fmt.Sprintf("[\"content-length-range\", %d, %d]", + p.contentLengthRange.min, p.contentLengthRange.max)) + } + if len(policies) > 0 { + policiesStr = `"conditions":[` + strings.Join(policies, ",") + "]" + } + retStr := "{" + retStr = retStr + expirationStr + "," + retStr = retStr + policiesStr + retStr = retStr + "}" + return []byte(retStr) +} + +// base64 produces base64 of PostPolicy's Marshalled json +func (p PostPolicy) base64() string { + return base64.StdEncoding.EncodeToString(p.marshalJSON()) +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/request.go b/Godeps/_workspace/src/github.com/minio/minio-go/request.go new file mode 100644 index 000000000..74d59b624 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/request.go @@ -0,0 +1,498 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "encoding/hex" + "errors" + "io" + "io/ioutil" + "net/http" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" +) + +// operation - rest operation +type operation struct { + HTTPServer string + HTTPMethod string + HTTPPath string +} + +// request - a http request +type request struct { + req *http.Request + config *Config + body io.ReadSeeker + expires string +} + +const ( + authHeader = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" + yyyymmdd = "20060102" +) + +/// +/// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258 +/// +/// User-Agent: +/// +/// This is ignored from signing because signing this causes problems with generating pre-signed URLs +/// (that are executed by other agents) or when customers pass requests through proxies, which may +/// modify the user-agent. +/// +/// Content-Length: +/// +/// This is ignored from signing because generating a pre-signed URL should not provide a content-length +/// constraint, specifically when vending a S3 pre-signed PUT URL. The corollary to this is that when +/// sending regular requests (non-pre-signed), the signature contains a checksum of the body, which +/// implicitly validates the payload length (since changing the number of bytes would change the checksum) +/// and therefore this header is not valuable in the signature. +/// +/// Content-Type: +/// +/// Signing this header causes quite a number of problems in browser environments, where browsers +/// like to modify and normalize the content-type header in different ways. There is more information +/// on this in https://github.com/aws/aws-sdk-js/issues/244. Avoiding this field simplifies logic +/// and reduces the possibility of future bugs +/// +/// Authorization: +/// +/// Is skipped for obvious reasons +/// +var ignoredHeaders = map[string]bool{ + "Authorization": true, + "Content-Type": true, + "Content-Length": true, + "User-Agent": true, +} + +// getURLEncodedPath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func getURLEncodedPath(pathName string) string { + // if object matches reserved string, no need to encode them + reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + if reservedNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} + +func path2BucketAndObject(path string) (bucketName, objectName string) { + pathSplits := strings.SplitN(path, "?", 2) + splits := strings.SplitN(pathSplits[0], separator, 3) + switch len(splits) { + case 0, 1: + bucketName = "" + objectName = "" + case 2: + bucketName = splits[1] + objectName = "" + case 3: + bucketName = splits[1] + objectName = splits[2] + } + return bucketName, objectName +} + +// path2Object gives objectName from URL path +func path2Object(path string) (objectName string) { + _, objectName = path2BucketAndObject(path) + return +} + +// path2Bucket gives bucketName from URL path +func path2Bucket(path string) (bucketName string) { + bucketName, _ = path2BucketAndObject(path) + return +} + +// path2Query gives query part from URL path +func path2Query(path string) (query string) { + pathSplits := strings.SplitN(path, "?", 2) + if len(pathSplits) > 1 { + query = pathSplits[1] + } + return +} + +func (op *operation) getRequestURL(config Config) (url string) { + // parse URL for the combination of HTTPServer + HTTPPath + url = op.HTTPServer + separator + if !config.isVirtualStyle { + url += path2Bucket(op.HTTPPath) + } + objectName := getURLEncodedPath(path2Object(op.HTTPPath)) + queryPath := path2Query(op.HTTPPath) + if objectName == "" && queryPath != "" { + url += "?" + queryPath + return + } + if objectName != "" && queryPath == "" { + if strings.HasSuffix(url, separator) { + url += objectName + } else { + url += separator + objectName + } + return + } + if objectName != "" && queryPath != "" { + if strings.HasSuffix(url, separator) { + url += objectName + "?" + queryPath + } else { + url += separator + objectName + "?" + queryPath + } + } + return +} + +func newPresignedRequest(op *operation, config *Config, expires string) (*request, error) { + // if no method default to POST + method := op.HTTPMethod + if method == "" { + method = "POST" + } + + u := op.getRequestURL(*config) + + // get a new HTTP request, for the requested method + req, err := http.NewRequest(method, u, nil) + if err != nil { + return nil, err + } + + // set UserAgent + req.Header.Set("User-Agent", config.userAgent) + + // set Accept header for response encoding style, if available + if config.AcceptType != "" { + req.Header.Set("Accept", config.AcceptType) + } + + // save for subsequent use + r := new(request) + r.config = config + r.expires = expires + r.req = req + r.body = nil + + return r, nil +} + +// newUnauthenticatedRequest - instantiate a new unauthenticated request +func newUnauthenticatedRequest(op *operation, config *Config, body io.Reader) (*request, error) { + // if no method default to POST + method := op.HTTPMethod + if method == "" { + method = "POST" + } + + u := op.getRequestURL(*config) + + // get a new HTTP request, for the requested method + req, err := http.NewRequest(method, u, nil) + if err != nil { + return nil, err + } + + // set UserAgent + req.Header.Set("User-Agent", config.userAgent) + + // set Accept header for response encoding style, if available + if config.AcceptType != "" { + req.Header.Set("Accept", config.AcceptType) + } + + // add body + switch { + case body == nil: + req.Body = nil + default: + req.Body = ioutil.NopCloser(body) + } + + // save for subsequent use + r := new(request) + r.req = req + r.config = config + + return r, nil +} + +// newRequest - instantiate a new request +func newRequest(op *operation, config *Config, body io.ReadSeeker) (*request, error) { + // if no method default to POST + method := op.HTTPMethod + if method == "" { + method = "POST" + } + + u := op.getRequestURL(*config) + + // get a new HTTP request, for the requested method + req, err := http.NewRequest(method, u, nil) + if err != nil { + return nil, err + } + + // set UserAgent + req.Header.Set("User-Agent", config.userAgent) + + // set Accept header for response encoding style, if available + if config.AcceptType != "" { + req.Header.Set("Accept", config.AcceptType) + } + + // add body + switch { + case body == nil: + req.Body = nil + default: + req.Body = ioutil.NopCloser(body) + } + + // save for subsequent use + r := new(request) + r.config = config + r.req = req + r.body = body + + return r, nil +} + +// Do - start the request +func (r *request) Do() (resp *http.Response, err error) { + if r.config.AccessKeyID != "" && r.config.SecretAccessKey != "" { + r.SignV4() + } + transport := http.DefaultTransport + if r.config.Transport != nil { + transport = r.config.Transport + } + // do not use http.Client{}, while it may seem intuitive but the problem seems to be + // that http.Client{} internally follows redirects and there is no easier way to disable + // it from outside using a configuration parameter - + // this auto redirect causes complications in verifying subsequent errors + // + // The best is to use RoundTrip() directly, so the request comes back to the caller where + // we are going to handle such replies. And indeed that is the right thing to do here. + // + return transport.RoundTrip(r.req) +} + +// Set - set additional headers if any +func (r *request) Set(key, value string) { + r.req.Header.Set(key, value) +} + +// Get - get header values +func (r *request) Get(key string) string { + return r.req.Header.Get(key) +} + +// getHashedPayload get the hexadecimal value of the SHA256 hash of the request payload +func (r *request) getHashedPayload() string { + hash := func() string { + switch { + case r.expires != "": + return "UNSIGNED-PAYLOAD" + case r.body == nil: + return hex.EncodeToString(sum256([]byte{})) + default: + sum256Bytes, _ := sum256Reader(r.body) + return hex.EncodeToString(sum256Bytes) + } + } + hashedPayload := hash() + if hashedPayload != "UNSIGNED-PAYLOAD" { + r.req.Header.Set("X-Amz-Content-Sha256", hashedPayload) + } + return hashedPayload +} + +// getCanonicalHeaders generate a list of request headers with their values +func (r *request) getCanonicalHeaders() string { + var headers []string + vals := make(map[string][]string) + for k, vv := range r.req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // ignored header + } + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + headers = append(headers, "host") + sort.Strings(headers) + + var buf bytes.Buffer + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + switch { + case k == "host": + buf.WriteString(r.req.URL.Host) + fallthrough + default: + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(v) + } + buf.WriteByte('\n') + } + } + return buf.String() +} + +// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names +func (r *request) getSignedHeaders() string { + var headers []string + for k := range r.req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // ignored header + } + headers = append(headers, strings.ToLower(k)) + } + headers = append(headers, "host") + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getCanonicalRequest generate a canonical request of style +// +// canonicalRequest = +// \n +// \n +// \n +// \n +// \n +// +// +func (r *request) getCanonicalRequest(hashedPayload string) string { + r.req.URL.RawQuery = strings.Replace(r.req.URL.Query().Encode(), "+", "%20", -1) + canonicalRequest := strings.Join([]string{ + r.req.Method, + getURLEncodedPath(r.req.URL.Path), + r.req.URL.RawQuery, + r.getCanonicalHeaders(), + r.getSignedHeaders(), + hashedPayload, + }, "\n") + return canonicalRequest +} + +// getStringToSign a string based on selected query values +func (r *request) getStringToSign(canonicalRequest string, t time.Time) string { + stringToSign := authHeader + "\n" + t.Format(iso8601DateFormat) + "\n" + stringToSign = stringToSign + getScope(r.config.Region, t) + "\n" + stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) + return stringToSign +} + +// Presign the request, in accordance with - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html +func (r *request) PreSignV4() (string, error) { + if r.config.AccessKeyID == "" && r.config.SecretAccessKey == "" { + return "", errors.New("presign requires accesskey and secretkey") + } + r.SignV4() + return r.req.URL.String(), nil +} + +func (r *request) PostPresignSignature(policyBase64 string, t time.Time) string { + signingkey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t) + signature := getSignature(signingkey, policyBase64) + return signature +} + +// SignV4 the request before Do(), in accordance with - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +func (r *request) SignV4() { + query := r.req.URL.Query() + if r.expires != "" { + query.Set("X-Amz-Algorithm", authHeader) + } + t := time.Now().UTC() + // Add date if not present + if r.expires != "" { + query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + query.Set("X-Amz-Expires", r.expires) + } else { + r.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + } + + hashedPayload := r.getHashedPayload() + signedHeaders := r.getSignedHeaders() + if r.expires != "" { + query.Set("X-Amz-SignedHeaders", signedHeaders) + } + credential := getCredential(r.config.AccessKeyID, r.config.Region, t) + if r.expires != "" { + query.Set("X-Amz-Credential", credential) + r.req.URL.RawQuery = query.Encode() + } + canonicalRequest := r.getCanonicalRequest(hashedPayload) + stringToSign := r.getStringToSign(canonicalRequest, t) + signingKey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t) + signature := getSignature(signingKey, stringToSign) + + if r.expires != "" { + r.req.URL.RawQuery += "&X-Amz-Signature=" + signature + } else { + // final Authorization header + parts := []string{ + authHeader + " Credential=" + credential, + "SignedHeaders=" + signedHeaders, + "Signature=" + signature, + } + auth := strings.Join(parts, ", ") + r.Set("Authorization", auth) + } +} diff --git a/Godeps/_workspace/src/gopkg.in/amz.v3/aws/attempt.go b/Godeps/_workspace/src/gopkg.in/amz.v3/aws/attempt.go deleted file mode 100644 index c0654f5d8..000000000 --- a/Godeps/_workspace/src/gopkg.in/amz.v3/aws/attempt.go +++ /dev/null @@ -1,74 +0,0 @@ -package aws - -import ( - "time" -) - -// AttemptStrategy represents a strategy for waiting for an action -// to complete successfully. This is an internal type used by the -// implementation of other goamz packages. -type AttemptStrategy struct { - Total time.Duration // total duration of attempt. - Delay time.Duration // interval between each try in the burst. - Min int // minimum number of retries; overrides Total -} - -type Attempt struct { - strategy AttemptStrategy - last time.Time - end time.Time - force bool - count int -} - -// Start begins a new sequence of attempts for the given strategy. -func (s AttemptStrategy) Start() *Attempt { - now := time.Now() - return &Attempt{ - strategy: s, - last: now, - end: now.Add(s.Total), - force: true, - } -} - -// Next waits until it is time to perform the next attempt or returns -// false if it is time to stop trying. -func (a *Attempt) Next() bool { - now := time.Now() - sleep := a.nextSleep(now) - if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count { - return false - } - a.force = false - if sleep > 0 && a.count > 0 { - time.Sleep(sleep) - now = time.Now() - } - a.count++ - a.last = now - return true -} - -func (a *Attempt) nextSleep(now time.Time) time.Duration { - sleep := a.strategy.Delay - now.Sub(a.last) - if sleep < 0 { - return 0 - } - return sleep -} - -// HasNext returns whether another attempt will be made if the current -// one fails. If it returns true, the following call to Next is -// guaranteed to return true. -func (a *Attempt) HasNext() bool { - if a.force || a.strategy.Min > a.count { - return true - } - now := time.Now() - if now.Add(a.nextSleep(now)).Before(a.end) { - a.force = true - return true - } - return false -} diff --git a/Godeps/_workspace/src/gopkg.in/amz.v3/aws/attempt_test.go b/Godeps/_workspace/src/gopkg.in/amz.v3/aws/attempt_test.go deleted file mode 100644 index 761424289..000000000 --- a/Godeps/_workspace/src/gopkg.in/amz.v3/aws/attempt_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package aws_test - -import ( - "time" - - . "gopkg.in/check.v1" - - "gopkg.in/amz.v3/aws" -) - -func (S) TestAttemptTiming(c *C) { - testAttempt := aws.AttemptStrategy{ - Total: 0.25e9, - Delay: 0.1e9, - } - want := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9} - got := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing - t0 := time.Now() - for a := testAttempt.Start(); a.Next(); { - got = append(got, time.Now().Sub(t0)) - } - got = append(got, time.Now().Sub(t0)) - c.Assert(got, HasLen, len(want)) - const margin = 0.01e9 - for i, got := range want { - lo := want[i] - margin - hi := want[i] + margin - if got < lo || got > hi { - c.Errorf("attempt %d want %g got %g", i, want[i].Seconds(), got.Seconds()) - } - } -} - -func (S) TestAttemptNextHasNext(c *C) { - a := aws.AttemptStrategy{}.Start() - c.Assert(a.Next(), Equals, true) - c.Assert(a.Next(), Equals, false) - - a = aws.AttemptStrategy{}.Start() - c.Assert(a.Next(), Equals, true) - c.Assert(a.HasNext(), Equals, false) - c.Assert(a.Next(), Equals, false) - - a = aws.AttemptStrategy{Total: 2e8}.Start() - c.Assert(a.Next(), Equals, true) - c.Assert(a.HasNext(), Equals, true) - time.Sleep(2e8) - c.Assert(a.HasNext(), Equals, true) - c.Assert(a.Next(), Equals, true) - c.Assert(a.Next(), Equals, false) - - a = aws.AttemptStrategy{Total: 1e8, Min: 2}.Start() - time.Sleep(1e8) - c.Assert(a.Next(), Equals, true) - c.Assert(a.HasNext(), Equals, true) - c.Assert(a.Next(), Equals, true) - c.Assert(a.HasNext(), Equals, false) - c.Assert(a.Next(), Equals, false) -} diff --git a/Godeps/_workspace/src/gopkg.in/amz.v3/aws/aws.go b/Godeps/_workspace/src/gopkg.in/amz.v3/aws/aws.go deleted file mode 100644 index f2ad7f57c..000000000 --- a/Godeps/_workspace/src/gopkg.in/amz.v3/aws/aws.go +++ /dev/null @@ -1,268 +0,0 @@ -// -// goamz - Go packages to interact with the Amazon Web Services. -// -// https://wiki.ubuntu.com/goamz -// -// Copyright (c) 2011 Canonical Ltd. -// - -package aws - -import ( - "errors" - "os" - "strings" -) - -// Region defines the URLs where AWS services may be accessed. -// -// See http://goo.gl/d8BP1 for more details. -type Region struct { - Name string // the canonical name of this region. - EC2Endpoint string - S3Endpoint string - S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name. - S3LocationConstraint bool // true if this region requires a LocationConstraint declaration. - S3LowercaseBucket bool // true if the region requires bucket names to be lower case. - SDBEndpoint string // not all regions have simpleDB, fro eg. Frankfurt (eu-central-1) does not - SNSEndpoint string - SQSEndpoint string - IAMEndpoint string -} - -func (r Region) ResolveS3BucketEndpoint(bucketName string) string { - if r.S3BucketEndpoint != "" { - return strings.ToLower(strings.Replace(r.S3BucketEndpoint, "${bucket}", bucketName, -1)) - } - return strings.ToLower(r.S3Endpoint + "/" + bucketName + "/") -} - -var USEast = Region{ - "us-east-1", // US East (N. Virginia) - "https://ec2.us-east-1.amazonaws.com", - "https://s3.amazonaws.com", - "", - false, - false, - "https://sdb.amazonaws.com", - "https://sns.us-east-1.amazonaws.com", - "https://sqs.us-east-1.amazonaws.com", - "https://iam.amazonaws.com", -} - -var USWest = Region{ - "us-west-1", //US West (N. California) - "https://ec2.us-west-1.amazonaws.com", - "https://s3-us-west-1.amazonaws.com", - "", - true, - true, - "https://sdb.us-west-1.amazonaws.com", - "https://sns.us-west-1.amazonaws.com", - "https://sqs.us-west-1.amazonaws.com", - "https://iam.amazonaws.com", -} - -var USWest2 = Region{ - "us-west-2", // US West (Oregon) - "https://ec2.us-west-2.amazonaws.com", - "https://s3-us-west-2.amazonaws.com", - "", - true, - true, - "https://sdb.us-west-2.amazonaws.com", - "https://sns.us-west-2.amazonaws.com", - "https://sqs.us-west-2.amazonaws.com", - "https://iam.amazonaws.com", -} - -var USGovWest = Region{ - "us-gov-west-1", // Isolated regions, AWS GovCloud (US) - "https://ec2.us-gov-west-1.amazonaws.com", - "https://s3-us-gov-west-1.amazonaws.com", - "", - true, - true, - "", - "https://sns.us-gov-west-1.amazonaws.com", - "https://sqs.us-gov-west-1.amazonaws.com", - "https://iam.us-gov.amazonaws.com", -} - -var EUWest = Region{ - "eu-west-1", // EU (Ireland) - "https://ec2.eu-west-1.amazonaws.com", - "https://s3-eu-west-1.amazonaws.com", - "", - true, - true, - "https://sdb.eu-west-1.amazonaws.com", - "https://sns.eu-west-1.amazonaws.com", - "https://sqs.eu-west-1.amazonaws.com", - "https://iam.amazonaws.com", -} - -var EUCentral = Region{ - "eu-central-1", // EU (Frankfurt) - "https://ec2.eu-central-1.amazonaws.com", - "https://s3-eu-central-1.amazonaws.com", - "", - true, - true, - "", - "https://sns.eu-central-1.amazonaws.com", - "https://sqs.eu-central-1.amazonaws.com", - "https://iam.amazonaws.com", -} - -var APSoutheast = Region{ - "ap-southeast-1", // Asia Pacific (Singapore) - "https://ec2.ap-southeast-1.amazonaws.com", - "https://s3-ap-southeast-1.amazonaws.com", - "", - true, - true, - "https://sdb.ap-southeast-1.amazonaws.com", - "https://sns.ap-southeast-1.amazonaws.com", - "https://sqs.ap-southeast-1.amazonaws.com", - "https://iam.amazonaws.com", -} - -var APSoutheast2 = Region{ - "ap-southeast-2", //Asia Pacific (Sydney) - "https://ec2.ap-southeast-2.amazonaws.com", - "https://s3-ap-southeast-2.amazonaws.com", - "", - true, - true, - "https://sdb.ap-southeast-2.amazonaws.com", - "https://sns.ap-southeast-2.amazonaws.com", - "https://sqs.ap-southeast-2.amazonaws.com", - "https://iam.amazonaws.com", -} - -var APNortheast = Region{ - "ap-northeast-1", //Asia Pacific (Tokyo) - "https://ec2.ap-northeast-1.amazonaws.com", - "https://s3-ap-northeast-1.amazonaws.com", - "", - true, - true, - "https://sdb.ap-northeast-1.amazonaws.com", - "https://sns.ap-northeast-1.amazonaws.com", - "https://sqs.ap-northeast-1.amazonaws.com", - "https://iam.amazonaws.com", -} - -var SAEast = Region{ - "sa-east-1", // South America (Sao Paulo) - "https://ec2.sa-east-1.amazonaws.com", - "https://s3-sa-east-1.amazonaws.com", - "", - true, - true, - "https://sdb.sa-east-1.amazonaws.com", - "https://sns.sa-east-1.amazonaws.com", - "https://sqs.sa-east-1.amazonaws.com", - "https://iam.amazonaws.com", -} - -var CNNorth = Region{ - "cn-north-1", // Isolated regions, China (Beijing) - "https://ec2.cn-north-1.amazonaws.com.cn", - "https://s3.cn-north-1.amazonaws.com.cn", - "", - true, - true, - "https://sdb.cn-north-1.amazonaws.com.cn", - "https://sns.cn-north-1.amazonaws.com.cn", - "https://sqs.cn-north-1.amazonaws.com.cn", - "https://iam.cn-north-1.amazonaws.com.cn", -} - -var Regions = map[string]Region{ - APNortheast.Name: APNortheast, - APSoutheast.Name: APSoutheast, - APSoutheast2.Name: APSoutheast2, - EUWest.Name: EUWest, - EUCentral.Name: EUCentral, - USEast.Name: USEast, - USWest.Name: USWest, - USWest2.Name: USWest2, - USGovWest.Name: USGovWest, - SAEast.Name: SAEast, - CNNorth.Name: CNNorth, -} - -type Auth struct { - AccessKey, SecretKey string -} - -var unreserved = make([]bool, 128) -var hex = "0123456789ABCDEF" - -func init() { - // RFC3986 - u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~" - for _, c := range u { - unreserved[c] = true - } -} - -// EnvAuth creates an Auth based on environment information. -// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment -// variables are used as the first preference, but EC2_ACCESS_KEY -// and EC2_SECRET_KEY or AWS_ACCESS_KEY and AWS_SECRET_KEY -// environment variables are also supported. -func EnvAuth() (auth Auth, err error) { - auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID") - auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") - // first fallbaback to EC2_ env variable - if auth.AccessKey == "" && auth.SecretKey == "" { - auth.AccessKey = os.Getenv("EC2_ACCESS_KEY") - auth.SecretKey = os.Getenv("EC2_SECRET_KEY") - } - // second fallbaback to AWS_ env variable - if auth.AccessKey == "" && auth.SecretKey == "" { - auth.AccessKey = os.Getenv("AWS_ACCESS_KEY") - auth.SecretKey = os.Getenv("AWS_SECRET_KEY") - } - if auth.AccessKey == "" { - err = errors.New("AWS_ACCESS_KEY_ID not found in environment") - } - if auth.SecretKey == "" { - err = errors.New("AWS_SECRET_ACCESS_KEY not found in environment") - } - return -} - -// Encode takes a string and URI-encodes it in a way suitable -// to be used in AWS signatures. -func Encode(s string) string { - encode := false - for i := 0; i != len(s); i++ { - c := s[i] - if c > 127 || !unreserved[c] { - encode = true - break - } - } - if !encode { - return s - } - e := make([]byte, len(s)*3) - ei := 0 - for i := 0; i != len(s); i++ { - c := s[i] - if c > 127 || !unreserved[c] { - e[ei] = '%' - e[ei+1] = hex[c>>4] - e[ei+2] = hex[c&0xF] - ei += 3 - } else { - e[ei] = c - ei += 1 - } - } - return string(e[:ei]) -} diff --git a/Godeps/_workspace/src/gopkg.in/amz.v3/aws/aws_test.go b/Godeps/_workspace/src/gopkg.in/amz.v3/aws/aws_test.go deleted file mode 100644 index 32fe8e56b..000000000 --- a/Godeps/_workspace/src/gopkg.in/amz.v3/aws/aws_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package aws_test - -import ( - "os" - "strings" - "testing" - - . "gopkg.in/check.v1" - - "gopkg.in/amz.v3/aws" -) - -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&S{}) - -type S struct { - environ []string -} - -func (s *S) SetUpSuite(c *C) { - s.environ = os.Environ() -} - -func (s *S) TearDownTest(c *C) { - os.Clearenv() - for _, kv := range s.environ { - l := strings.SplitN(kv, "=", 2) - os.Setenv(l[0], l[1]) - } -} - -func (s *S) TestEnvAuthNoSecret(c *C) { - os.Clearenv() - _, err := aws.EnvAuth() - c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY not found in environment") -} - -func (s *S) TestEnvAuthNoAccess(c *C) { - os.Clearenv() - os.Setenv("AWS_SECRET_ACCESS_KEY", "foo") - _, err := aws.EnvAuth() - c.Assert(err, ErrorMatches, "AWS_ACCESS_KEY_ID not found in environment") -} - -func (s *S) TestEnvAuth(c *C) { - os.Clearenv() - os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") - os.Setenv("AWS_ACCESS_KEY_ID", "access") - auth, err := aws.EnvAuth() - c.Assert(err, IsNil) - c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) -} - -func (s *S) TestEnvAuthLegacy(c *C) { - os.Clearenv() - os.Setenv("EC2_SECRET_KEY", "secret") - os.Setenv("EC2_ACCESS_KEY", "access") - auth, err := aws.EnvAuth() - c.Assert(err, IsNil) - c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) -} - -func (s *S) TestEnvAuthAws(c *C) { - os.Clearenv() - os.Setenv("AWS_SECRET_KEY", "secret") - os.Setenv("AWS_ACCESS_KEY", "access") - auth, err := aws.EnvAuth() - c.Assert(err, IsNil) - c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) -} - -func (s *S) TestEncode(c *C) { - c.Assert(aws.Encode("foo"), Equals, "foo") - c.Assert(aws.Encode("/"), Equals, "%2F") -} - -func (s *S) TestRegionsAreNamed(c *C) { - for n, r := range aws.Regions { - c.Assert(n, Equals, r.Name) - } -} diff --git a/Godeps/_workspace/src/gopkg.in/amz.v3/aws/sign.go b/Godeps/_workspace/src/gopkg.in/amz.v3/aws/sign.go deleted file mode 100644 index 06c96e63b..000000000 --- a/Godeps/_workspace/src/gopkg.in/amz.v3/aws/sign.go +++ /dev/null @@ -1,447 +0,0 @@ -package aws - -import ( - "bytes" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "net/url" - "sort" - "strings" - "time" -) - -var debug = log.New( - // Remove the c-style comment header to front of line to debug information. - /*os.Stdout, //*/ ioutil.Discard, - "DEBUG: ", - log.LstdFlags, -) - -type Signer func(*http.Request, Auth) error - -// Ensure our signers meet the interface -var _ Signer = SignV2 -var _ Signer = SignV4Factory("", "") - -type hasher func(io.Reader) (string, error) - -const ( - ISO8601BasicFormat = "20060102T150405Z" - ISO8601BasicFormatShort = "20060102" -) - -// SignV2 signs an HTTP request utilizing version 2 of the AWS -// signature, and the given credentials. -func SignV2(req *http.Request, auth Auth) (err error) { - - queryVals := req.URL.Query() - queryVals.Set("AWSAccessKeyId", auth.AccessKey) - queryVals.Set("SignatureVersion", "2") - queryVals.Set("SignatureMethod", "HmacSHA256") - - uriStr := canonicalURI(req.URL) - queryStr := canonicalQueryString(queryVals) - - payload := new(bytes.Buffer) - if err := errorCollector( - fprintfWrapper(payload, "%s\n", requestMethodVerb(req.Method)), - fprintfWrapper(payload, "%s\n", req.Host), - fprintfWrapper(payload, "%s\n", uriStr), - fprintfWrapper(payload, "%s", queryStr), - ); err != nil { - return err - } - - hash := hmac.New(sha256.New, []byte(auth.SecretKey)) - hash.Write(payload.Bytes()) - signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size())) - base64.StdEncoding.Encode(signature, hash.Sum(nil)) - - queryVals.Set("Signature", string(signature)) - req.URL.RawQuery = queryVals.Encode() - - return nil -} - -// SignV4Factory returns a version 4 Signer which will utilize the -// given region name. -func SignV4Factory(regionName, serviceName string) Signer { - return func(req *http.Request, auth Auth) error { - return SignV4(req, auth, regionName, serviceName) - } -} - -func SignV4URL(req *http.Request, auth Auth, regionName, svcName string, expires time.Duration) error { - reqTime, err := requestTime(req) - if err != nil { - return err - } - - req.Header.Del("date") - - credScope := credentialScope(reqTime, regionName, svcName) - - queryVals := req.URL.Query() - queryVals.Set("X-Amz-Algorithm", "AWS4-HMAC-SHA256") - queryVals.Set("X-Amz-Credential", auth.AccessKey+"/"+credScope) - queryVals.Set("X-Amz-Date", reqTime.Format(ISO8601BasicFormat)) - queryVals.Set("X-Amz-Expires", fmt.Sprintf("%d", int(expires.Seconds()))) - queryVals.Set("X-Amz-SignedHeaders", "host") - req.URL.RawQuery = queryVals.Encode() - - _, canonReqHash, _, err := canonicalRequest(req, sha256Hasher, false) - if err != nil { - return err - } - - var strToSign string - if strToSign, err = stringToSign(reqTime, canonReqHash, credScope); err != nil { - return err - } - - key := signingKey(reqTime, auth.SecretKey, regionName, svcName) - signature := fmt.Sprintf("%x", hmacHasher(key, strToSign)) - - debug.Printf("strToSign:\n\"\"\"\n%s\n\"\"\"", strToSign) - - queryVals.Set("X-Amz-Signature", signature) - - req.URL.RawQuery = queryVals.Encode() - - return nil -} - -// SignV4 signs an HTTP request utilizing version 4 of the AWS -// signature, and the given credentials. -func SignV4(req *http.Request, auth Auth, regionName, svcName string) (err error) { - - var reqTime time.Time - if reqTime, err = requestTime(req); err != nil { - return err - } - - // Remove any existing authorization headers as they will corrupt - // the signing. - delete(req.Header, "Authorization") - delete(req.Header, "authorization") - - credScope := credentialScope(reqTime, regionName, svcName) - - _, canonReqHash, sortedHdrNames, err := canonicalRequest(req, sha256Hasher, true) - if err != nil { - return err - } - - var strToSign string - if strToSign, err = stringToSign(reqTime, canonReqHash, credScope); err != nil { - return err - } - - key := signingKey(reqTime, auth.SecretKey, regionName, svcName) - signature := fmt.Sprintf("%x", hmacHasher(key, strToSign)) - - debug.Printf("strToSign:\n\"\"\"\n%s\n\"\"\"", strToSign) - - var authHdrVal string - if authHdrVal, err = authHeaderString( - req.Header, - auth.AccessKey, - signature, - credScope, - sortedHdrNames, - ); err != nil { - return err - } - - req.Header.Set("Authorization", authHdrVal) - - return nil -} - -// Task 1: Create a Canonical Request. -// Returns the canonical request, and its hash. -func canonicalRequest( - req *http.Request, - hasher hasher, - calcPayHash bool, -) (canReq, canReqHash string, sortedHdrNames []string, err error) { - - payHash := "UNSIGNED-PAYLOAD" - if calcPayHash { - if payHash, err = payloadHash(req, hasher); err != nil { - return - } - req.Header.Set("x-amz-content-sha256", payHash) - } - - sortedHdrNames = sortHeaderNames(req.Header, "host") - var canHdr string - if canHdr, err = canonicalHeaders(sortedHdrNames, req.Host, req.Header); err != nil { - return - } - - debug.Printf("canHdr:\n\"\"\"\n%s\n\"\"\"", canHdr) - debug.Printf("signedHeader: %s\n\n", strings.Join(sortedHdrNames, ";")) - - uriStr := canonicalURI(req.URL) - queryStr := canonicalQueryString(req.URL.Query()) - - c := new(bytes.Buffer) - if err := errorCollector( - fprintfWrapper(c, "%s\n", requestMethodVerb(req.Method)), - fprintfWrapper(c, "%s\n", uriStr), - fprintfWrapper(c, "%s\n", queryStr), - fprintfWrapper(c, "%s\n", canHdr), - fprintfWrapper(c, "%s\n", strings.Join(sortedHdrNames, ";")), - fprintfWrapper(c, "%s", payHash), - ); err != nil { - return "", "", nil, err - } - - canReq = c.String() - debug.Printf("canReq:\n\"\"\"\n%s\n\"\"\"", canReq) - canReqHash, err = hasher(bytes.NewBuffer([]byte(canReq))) - - return canReq, canReqHash, sortedHdrNames, err -} - -// Task 2: Create a string to Sign -// Returns a string in the defined format to sign for the authorization header. -func stringToSign( - t time.Time, - canonReqHash string, - credScope string, -) (string, error) { - w := new(bytes.Buffer) - if err := errorCollector( - fprintfWrapper(w, "AWS4-HMAC-SHA256\n"), - fprintfWrapper(w, "%s\n", t.Format(ISO8601BasicFormat)), - fprintfWrapper(w, "%s\n", credScope), - fprintfWrapper(w, "%s", canonReqHash), - ); err != nil { - return "", err - } - - return w.String(), nil -} - -// Task 3: Calculate the Signature -// Returns a derived signing key. -func signingKey(t time.Time, secretKey, regionName, svcName string) []byte { - - kSecret := secretKey - kDate := hmacHasher([]byte("AWS4"+kSecret), t.Format(ISO8601BasicFormatShort)) - kRegion := hmacHasher(kDate, regionName) - kService := hmacHasher(kRegion, svcName) - kSigning := hmacHasher(kService, "aws4_request") - - return kSigning -} - -// Task 4: Add the Signing Information to the Request -// Returns a string to be placed in the Authorization header for the request. -func authHeaderString( - header http.Header, - accessKey, - signature string, - credScope string, - sortedHeaderNames []string, -) (string, error) { - w := new(bytes.Buffer) - if err := errorCollector( - fprintfWrapper(w, "AWS4-HMAC-SHA256 "), - fprintfWrapper(w, "Credential=%s/%s, ", accessKey, credScope), - fprintfWrapper(w, "SignedHeaders=%s, ", strings.Join(sortedHeaderNames, ";")), - fprintfWrapper(w, "Signature=%s", signature), - ); err != nil { - return "", err - } - - return w.String(), nil -} - -func canonicalURI(u *url.URL) string { - - // The algorithm states that if the path is empty, to just use a "/". - if u.Path == "" { - return "/" - } - - // Each path segment must be URI-encoded. - segments := strings.Split(u.Path, "/") - for i, segment := range segments { - segments[i] = goToAwsUrlEncoding(url.QueryEscape(segment)) - } - - return strings.Join(segments, "/") -} - -func canonicalQueryString(queryVals url.Values) string { - - // AWS dictates that if duplicate keys exist, their values be - // sorted as well. - for _, values := range queryVals { - sort.Strings(values) - } - - return goToAwsUrlEncoding(queryVals.Encode()) -} - -func goToAwsUrlEncoding(urlEncoded string) string { - // AWS dictates that we use %20 for encoding spaces rather than +. - // All significant +s should already be encoded into their - // hexadecimal equivalents before doing the string replace. - return strings.Replace(urlEncoded, "+", "%20", -1) -} - -func canonicalHeaders(sortedHeaderNames []string, host string, hdr http.Header) (string, error) { - buffer := new(bytes.Buffer) - - for _, hName := range sortedHeaderNames { - - hdrVals := host - if hName != "host" { - canonHdrKey := http.CanonicalHeaderKey(hName) - sortedHdrVals := hdr[canonHdrKey] - sort.Strings(sortedHdrVals) - hdrVals = strings.Join(sortedHdrVals, ",") - } - - if _, err := fmt.Fprintf(buffer, "%s:%s\n", hName, hdrVals); err != nil { - return "", err - } - } - - // There is intentionally a hanging newline at the end of the - // header list. - return buffer.String(), nil -} - -// Returns a SHA256 checksum of the request body. Represented as a -// lowercase hexadecimal string. -func payloadHash(req *http.Request, hasher hasher) (string, error) { - if req.Body == nil { - return hasher(bytes.NewBuffer(nil)) - } - - return hasher(req.Body) -} - -// Retrieve the header names, lower-case them, and sort them. -func sortHeaderNames(header http.Header, injectedNames ...string) []string { - - sortedNames := injectedNames - for hName, _ := range header { - sortedNames = append(sortedNames, strings.ToLower(hName)) - } - - sort.Strings(sortedNames) - - return sortedNames -} - -func hmacHasher(key []byte, value string) []byte { - h := hmac.New(sha256.New, key) - h.Write([]byte(value)) - return h.Sum(nil) -} - -func sha256Hasher(payloadReader io.Reader) (string, error) { - hasher := sha256.New() - _, err := io.Copy(hasher, payloadReader) - - return fmt.Sprintf("%x", hasher.Sum(nil)), err -} - -func credentialScope(t time.Time, regionName, svcName string) string { - return fmt.Sprintf( - "%s/%s/%s/aws4_request", - t.Format(ISO8601BasicFormatShort), - regionName, - svcName, - ) -} - -// We do a lot of fmt.Fprintfs in this package. Create a higher-order -// function to elide the bytes written return value so we can submit -// these calls to an error collector. -func fprintfWrapper(w io.Writer, format string, vals ...interface{}) func() error { - return func() error { - _, err := fmt.Fprintf(w, format, vals...) - return err - } -} - -// Poor man's maybe monad. -func errorCollector(writers ...func() error) error { - for _, writer := range writers { - if err := writer(); err != nil { - return err - } - } - - return nil -} - -// Retrieve the request time from the request. We will attempt to -// parse whatever we find, but we will not make up a request date for -// the user (i.e.: Magic!). -func requestTime(req *http.Request) (time.Time, error) { - - // Time formats to try. We want to do everything we can to accept - // all time formats, but ultimately we may fail. In the package - // scope so it doesn't get initialized for every request. - var timeFormats = []string{ - time.RFC822, - ISO8601BasicFormat, - time.RFC1123, - time.ANSIC, - time.UnixDate, - time.RubyDate, - time.RFC822Z, - time.RFC850, - time.RFC1123Z, - time.RFC3339, - time.RFC3339Nano, - time.Kitchen, - } - - // Get a date header. - var date string - if date = req.Header.Get("x-amz-date"); date == "" { - if date = req.Header.Get("date"); date == "" { - return time.Time{}, fmt.Errorf(`Could not retrieve a request date. Please provide one in either "x-amz-date", or "date".`) - } - } - - // Start attempting to parse - for _, format := range timeFormats { - if parsedTime, err := time.Parse(format, date); err == nil { - return parsedTime, nil - } - } - - return time.Time{}, fmt.Errorf( - "Could not parse the given date. Please utilize one of the following formats: %s", - strings.Join(timeFormats, ","), - ) -} - -// http.Request's Method member returns the entire method. Derive the -// verb. -func requestMethodVerb(rawMethod string) (verb string) { - verbPlus := strings.SplitN(rawMethod, " ", 2) - switch { - case len(verbPlus) == 0: // Per docs, Method will be empty if it's GET. - verb = "GET" - default: - verb = verbPlus[0] - } - return verb -} diff --git a/Godeps/_workspace/src/gopkg.in/amz.v3/aws/sign_test.go b/Godeps/_workspace/src/gopkg.in/amz.v3/aws/sign_test.go deleted file mode 100644 index 458df7e42..000000000 --- a/Godeps/_workspace/src/gopkg.in/amz.v3/aws/sign_test.go +++ /dev/null @@ -1,285 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "net/http" - "time" - - . "gopkg.in/check.v1" -) - -var _ = Suite(&SigningSuite{}) - -type SigningSuite struct{} - -// TODO(katco-): The signing methodology is a "one size fits all" -// approach. The hashes we check against don't include headers that -// are added in as requisite parts for S3. That doesn't mean the tests -// are invalid, or that signing is broken for these examples, but as -// long as we're adding heads in, it's impossible to know what the new -// signature should be. We should revaluate these later. See: -// https://github.com/go-amz/amz/issues/29 -const v4skipReason = `Extra headers present - cannot predict generated signature (issue #29).` - -// EC2 ReST authentication docs: http://goo.gl/fQmAN -var testAuth = Auth{"user", "secret"} - -func (s *SigningSuite) TestV4SignedUrl(c *C) { - - auth := Auth{"AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"} - req, err := http.NewRequest("GET", "https://examplebucket.s3.amazonaws.com/test.txt", nil) - req.Header.Add("date", "Fri, 24 May 2013 00:00:00 GMT") - c.Assert(err, IsNil) - err = SignV4URL(req, auth, USEast.Name, "s3", 86400*time.Second) - c.Assert(err, IsNil) - - c.Check(req.URL.String(), Equals, "https://examplebucket.s3.amazonaws.com/test.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-Signature=aeeed9bbccd4d02ee5c0109b86d86835f995330da4c265957d157751f604d404&X-Amz-SignedHeaders=host") -} - -func (s *SigningSuite) TestV4SignedUrlReserved(c *C) { - - auth := Auth{"AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"} - req, err := http.NewRequest("GET", "https://examplebucket.s3.amazonaws.com/some:reserved,characters", nil) - req.Header.Add("date", "Fri, 24 May 2013 00:00:00 GMT") - c.Assert(err, IsNil) - err = SignV4URL(req, auth, USEast.Name, "s3", 86400*time.Second) - c.Assert(err, IsNil) - - c.Check(req.URL.String(), Equals, "https://examplebucket.s3.amazonaws.com/some:reserved,characters?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-Signature=ac81e03593d6fc22ac045b9353b0242da755be2af80b981eb13917d8b9cf20a4&X-Amz-SignedHeaders=host") -} - -func (s *SigningSuite) TestV4StringToSign(c *C) { - - mockTime, err := time.Parse(time.RFC3339, "2011-09-09T23:36:00Z") - c.Assert(err, IsNil) - stringToSign, err := stringToSign( - mockTime, - "3511de7e95d28ecd39e9513b642aee07e54f4941150d8df8bf94b328ef7e55e2", - "20110909/us-east-1/iam/aws4_request", - ) - c.Assert(err, IsNil) - - const expected = `AWS4-HMAC-SHA256 -20110909T233600Z -20110909/us-east-1/iam/aws4_request -3511de7e95d28ecd39e9513b642aee07e54f4941150d8df8bf94b328ef7e55e2` - c.Assert(stringToSign, Equals, expected) -} - -func (s *SigningSuite) TestV4CanonicalRequest(c *C) { - - c.Skip(v4skipReason) - - body := new(bytes.Buffer) - _, err := fmt.Fprint(body, "Action=ListUsers&Version=2010-05-08") - c.Assert(err, IsNil) - - req, err := http.NewRequest("POST", "https://iam.amazonaws.com", body) - c.Assert(err, IsNil) - - req.Header.Add("content-type", "application/x-www-form-urlencoded; charset=utf-8") - req.Header.Add("host", req.URL.Host) - req.Header.Add("x-amz-date", "20110909T233600Z") - - canonReq, canonReqHash, _, err := canonicalRequest( - req, - sha256Hasher, - true, - ) - c.Assert(err, IsNil) - - const expected = `POST -/ - -content-type:application/x-www-form-urlencoded; charset=utf-8 -host:iam.amazonaws.com -x-amz-date:20110909T233600Z - -content-type;host;x-amz-date -b6359072c78d70ebee1e81adcbab4f01bf2c23245fa365ef83fe8f1f955085e2` - - c.Assert(canonReq, Equals, expected) - c.Assert(canonReqHash, Equals, "3511de7e95d28ecd39e9513b642aee07e54f4941150d8df8bf94b328ef7e55e2") -} - -func (s *SigningSuite) TestV4SigningKey(c *C) { - - c.Skip(v4skipReason) - - mockTime, err := time.Parse(time.RFC3339, "2011-09-09T23:36:00Z") - c.Assert(err, IsNil) - c.Assert( - fmt.Sprintf("%v", signingKey(mockTime, testAuth.SecretKey, USEast.Name, "iam")), - Equals, - "[152 241 216 137 254 196 244 66 26 220 82 43 171 12 225 248 46 105 41 194 98 237 21 229 169 76 144 239 209 227 176 231]") -} - -func (s *SigningSuite) TestV4BasicSignatureV4(c *C) { - - c.Skip(v4skipReason) - - body := new(bytes.Buffer) - - req, err := http.NewRequest("POST / http/1.1", "https://host.foo.com", body) - c.Assert(err, IsNil) - - req.Header.Add("Host", req.URL.Host) - req.Header.Add("Date", "Mon, 09 Sep 2011 23:36:00 GMT") - - testAuth := Auth{ - AccessKey: "AKIDEXAMPLE", - SecretKey: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", - } - err = SignV4(req, testAuth, USEast.Name, "host") - c.Assert(err, IsNil) - - c.Assert(req.Header.Get("Authorization"), Equals, `AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request,SignedHeaders=date;host,Signature=22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726`) -} - -func (s *SigningSuite) TestV4MoreCompleteSignature(c *C) { - - req, err := http.NewRequest("GET", "https://examplebucket.s3.amazonaws.com/test.txt", nil) - c.Assert(err, IsNil) - - req.Header.Set("x-amz-date", "20130524T000000Z") - req.Header.Set("Range", "bytes=0-9") - - testAuth := Auth{ - AccessKey: "AKIAIOSFODNN7EXAMPLE", - SecretKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", - } - err = SignV4(req, testAuth, USEast.Name, "s3") - c.Assert(err, IsNil) - c.Check(req.Header.Get("Authorization"), Equals, "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request, SignedHeaders=host;range;x-amz-content-sha256;x-amz-date, Signature=f0e8bdb87c964420e857bd35b5d6ed310bd44f0170aba48dd91039c6036bdb41") -} - -// -// v2 Tests -// - -func (s *SigningSuite) TestV2BasicSignature(c *C) { - req, err := http.NewRequest("GET", "http://localhost/path", nil) - c.Assert(err, IsNil) - - SignV2(req, testAuth) - - query := req.URL.Query() - - c.Assert(query.Get("SignatureVersion"), Equals, "2") - c.Assert(query.Get("SignatureMethod"), Equals, "HmacSHA256") - expected := "6lSe5QyXum0jMVc7cOUz32/52ZnL7N5RyKRk/09yiK4=" - c.Assert(query.Get("Signature"), Equals, expected) -} - -func (s *SigningSuite) TestV2ParamSignature(c *C) { - - req, err := http.NewRequest("GET", "http://localhost/path", nil) - c.Assert(err, IsNil) - - query := req.URL.Query() - for i := 1; i <= 3; i++ { - query.Add(fmt.Sprintf("param%d", i), fmt.Sprintf("value%d", i)) - } - req.URL.RawQuery = query.Encode() - - SignV2(req, testAuth) - - expected := "XWOR4+0lmK8bD8CGDGZ4kfuSPbb2JibLJiCl/OPu1oU=" - c.Assert(req.URL.Query().Get("Signature"), Equals, expected) -} - -func (s *SigningSuite) TestV2ManyParams(c *C) { - - req, err := http.NewRequest("GET", "http://localhost/path", nil) - c.Assert(err, IsNil) - - query := req.URL.Query() - orderedVals := []int{10, 2, 3, 4, 5, 6, 7, 8, 9, 1} - for i, val := range orderedVals { - query.Add(fmt.Sprintf("param%d", i+1), fmt.Sprintf("value%d", val)) - } - req.URL.RawQuery = query.Encode() - - SignV2(req, testAuth) - - expected := "di0sjxIvezUgQ1SIL6i+C/H8lL+U0CQ9frLIak8jkVg=" - c.Assert(req.URL.Query().Get("Signature"), Equals, expected) -} - -func (s *SigningSuite) TestV2Escaping(c *C) { - - req, err := http.NewRequest("GET", "http://localhost/path", nil) - c.Assert(err, IsNil) - - query := req.URL.Query() - query.Add("Nonce", "+ +") - req.URL.RawQuery = query.Encode() - - err = SignV2(req, testAuth) - c.Assert(err, IsNil) - - query = req.URL.Query() - c.Assert(query.Get("Nonce"), Equals, "+ +") - - expected := "bqffDELReIqwjg/W0DnsnVUmfLK4wXVLO4/LuG+1VFA=" - c.Assert(query.Get("Signature"), Equals, expected) -} - -func (s *SigningSuite) TestV2SignatureExample1(c *C) { - - req, err := http.NewRequest("GET", "http://sdb.amazonaws.com/", nil) - c.Assert(err, IsNil) - - query := req.URL.Query() - query.Add("Timestamp", "2009-02-01T12:53:20+00:00") - query.Add("Version", "2007-11-07") - query.Add("Action", "ListDomains") - req.URL.RawQuery = query.Encode() - - SignV2(req, Auth{"access", "secret"}) - - expected := "okj96/5ucWBSc1uR2zXVfm6mDHtgfNv657rRtt/aunQ=" - c.Assert(req.URL.Query().Get("Signature"), Equals, expected) -} - -// Tests example from: -// http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html -// Specifically, good for testing case when URL does not contain a / -func (s *SigningSuite) TestV2SignatureTutorialExample(c *C) { - - req, err := http.NewRequest("GET", "https://elasticmapreduce.amazonaws.com/", nil) - c.Assert(err, IsNil) - - query := req.URL.Query() - query.Add("Timestamp", "2011-10-03T15:19:30") - query.Add("Version", "2009-03-31") - query.Add("Action", "DescribeJobFlows") - req.URL.RawQuery = query.Encode() - - testAuth := Auth{"AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"} - err = SignV2(req, testAuth) - c.Assert(err, IsNil) - c.Assert(req.URL.Query().Get("Signature"), Equals, "i91nKc4PWAt0JJIdXwz9HxZCJDdiy6cf/Mj6vPxyYIs=") -} - -// https://bugs.launchpad.net/goamz/+bug/1022749 -func (s *SigningSuite) TestSignatureWithEndpointPath(c *C) { - - req, err := http.NewRequest("GET", "http://localhost:4444/services/Cloud", nil) - c.Assert(err, IsNil) - - queryStr := req.URL.Query() - queryStr.Add("Action", "RebootInstances") - queryStr.Add("Version", "2011-12-15") - queryStr.Add("InstanceId.1", "i-10a64379") - queryStr.Add("Timestamp", time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).In(time.UTC).Format(time.RFC3339)) - req.URL.RawQuery = queryStr.Encode() - - err = SignV2(req, Auth{"abc", "123"}) - c.Assert(err, IsNil) - c.Assert(req.URL.Query().Get("Signature"), Equals, "gdG/vEm+c6ehhhfkrJy3+wuVzw/rzKR42TYelMwti7M=") - err = req.ParseForm() - c.Assert(err, IsNil) - c.Assert(req.Form["Signature"], DeepEquals, []string{"gdG/vEm+c6ehhhfkrJy3+wuVzw/rzKR42TYelMwti7M="}) -} diff --git a/Godeps/_workspace/src/gopkg.in/amz.v3/s3/export_test.go b/Godeps/_workspace/src/gopkg.in/amz.v3/s3/export_test.go deleted file mode 100644 index 9c2c11f37..000000000 --- a/Godeps/_workspace/src/gopkg.in/amz.v3/s3/export_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package s3 - -import ( - "net/http" - - "gopkg.in/amz.v3/aws" -) - -var originalStrategy = attempts - -func BuildError(resp *http.Response) error { - return buildError(resp) -} - -func SetAttemptStrategy(s *aws.AttemptStrategy) { - if s == nil { - attempts = originalStrategy - } else { - attempts = *s - } -} - -func AttemptStrategy() aws.AttemptStrategy { - return attempts -} - -func SetListPartsMax(n int) { - listPartsMax = n -} - -func SetListMultiMax(n int) { - listMultiMax = n -} diff --git a/Godeps/_workspace/src/gopkg.in/amz.v3/s3/multi.go b/Godeps/_workspace/src/gopkg.in/amz.v3/s3/multi.go deleted file mode 100644 index 82036399b..000000000 --- a/Godeps/_workspace/src/gopkg.in/amz.v3/s3/multi.go +++ /dev/null @@ -1,502 +0,0 @@ -package s3 - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "encoding/hex" - "encoding/xml" - "errors" - "io" - "net/http" - "sort" - "strconv" -) - -// Multi represents an unfinished multipart upload. -// -// Multipart uploads allow sending big objects in smaller chunks. -// After all parts have been sent, the upload must be explicitly -// completed by calling Complete with the list of parts. -// -// See http://goo.gl/vJfTG for an overview of multipart uploads. -type Multi struct { - Bucket *Bucket - Key string - UploadId string -} - -// That's the default. Here just for testing. -var listMultiMax = 1000 - -type listMultiResp struct { - NextKeyMarker string - NextUploadIdMarker string - IsTruncated bool - Upload []Multi - CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` -} - -// ListMulti returns the list of unfinished multipart uploads in b. -// -// The prefix parameter limits the response to keys that begin with the -// specified prefix. You can use prefixes to separate a bucket into different -// groupings of keys (to get the feeling of folders, for example). -// -// The delim parameter causes the response to group all of the keys that -// share a common prefix up to the next delimiter in a single entry within -// the CommonPrefixes field. You can use delimiters to separate a bucket -// into different groupings of keys, similar to how folders would work. -// -// See http://goo.gl/ePioY for details. -func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) { - - req, err := http.NewRequest("GET", b.Region.ResolveS3BucketEndpoint(b.Name), nil) - if err != nil { - return nil, nil, err - } - - query := req.URL.Query() - query.Add("uploads", "") - query.Add("max-uploads", strconv.FormatInt(int64(listMultiMax), 10)) - query.Add("prefix", prefix) - query.Add("delimiter", delim) - req.URL.RawQuery = query.Encode() - - addAmazonDateHeader(req.Header) - - // We need to resign every iteration because we're changing variables. - if err := b.S3.Sign(req, b.Auth); err != nil { - return nil, nil, err - } - - for attempt := attempts.Start(); attempt.Next(); { - - resp, err := requestRetryLoop(req, attempts) - if err == nil { - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - err = buildError(resp) - } - } - if err != nil { - if shouldRetry(err) && attempt.HasNext() { - continue - } - return nil, nil, err - } - - var multiResp listMultiResp - if err := xml.NewDecoder(resp.Body).Decode(&multiResp); err != nil { - return nil, nil, err - } - resp.Body.Close() - - for i := range multiResp.Upload { - multi := &multiResp.Upload[i] - multi.Bucket = b - multis = append(multis, multi) - } - prefixes = append(prefixes, multiResp.CommonPrefixes...) - if !multiResp.IsTruncated { - return multis, prefixes, nil - } - - query := req.URL.Query() - query.Set("key-marker", multiResp.NextKeyMarker) - query.Set("upload-id-marker", multiResp.NextUploadIdMarker) - req.URL.RawQuery = query.Encode() - - // Last request worked; restart our counter. - attempt = attempts.Start() - } - - panic("unreachable") -} - -// Multi returns a multipart upload handler for the provided key -// inside b. If a multipart upload exists for key, it is returned, -// otherwise a new multipart upload is initiated with contType and perm. -func (b *Bucket) Multi(key, contType string, perm ACL) (*Multi, error) { - multis, _, err := b.ListMulti(key, "") - if err != nil && !hasCode(err, "NoSuchUpload") { - return nil, err - } - for _, m := range multis { - if m.Key == key { - return m, nil - } - } - return b.InitMulti(key, contType, perm) -} - -// InitMulti initializes a new multipart upload at the provided -// key inside b and returns a value for manipulating it. -// -// See http://goo.gl/XP8kL for details. -func (b *Bucket) InitMulti(key string, contType string, perm ACL) (*Multi, error) { - - req, err := http.NewRequest("POST", b.Region.ResolveS3BucketEndpoint(b.Name), nil) - if err != nil { - return nil, err - } - req.URL.Path += key - - query := req.URL.Query() - query.Add("uploads", "") - req.URL.RawQuery = query.Encode() - - req.Header.Add("Content-Type", contType) - req.Header.Add("Content-Length", "0") - req.Header.Add("x-amz-acl", string(perm)) - addAmazonDateHeader(req.Header) - - if err := b.S3.Sign(req, b.Auth); err != nil { - return nil, err - } - - resp, err := requestRetryLoop(req, attempts) - if err == nil { - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - err = buildError(resp) - } - } - if err != nil { - return nil, err - } - - var multiResp struct { - UploadId string `xml:"UploadId"` - } - if err := xml.NewDecoder(resp.Body).Decode(&multiResp); err != nil { - return nil, err - } - - return &Multi{Bucket: b, Key: key, UploadId: multiResp.UploadId}, nil -} - -// PutPart sends part n of the multipart upload, reading all the content from r. -// Each part, except for the last one, must be at least 5MB in size. -// -// See http://goo.gl/pqZer for details. -func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) { - partSize, _, md5b64, err := seekerInfo(r) - if err != nil { - return Part{}, err - } - return m.putPart(n, r, partSize, md5b64) -} - -func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) { - _, err := r.Seek(0, 0) - if err != nil { - return Part{}, err - } - - req, err := http.NewRequest("PUT", m.Bucket.Region.ResolveS3BucketEndpoint(m.Bucket.Name), r) - if err != nil { - return Part{}, err - } - req.Close = true - req.URL.Path += m.Key - req.ContentLength = partSize - - query := req.URL.Query() - query.Add("uploadId", m.UploadId) - query.Add("partNumber", strconv.FormatInt(int64(n), 10)) - req.URL.RawQuery = query.Encode() - - req.Header.Add("Content-MD5", md5b64) - addAmazonDateHeader(req.Header) - - if err := m.Bucket.S3.Sign(req, m.Bucket.Auth); err != nil { - return Part{}, err - } - // Signing may read the request body. - if _, err := r.Seek(0, 0); err != nil { - return Part{}, err - } - - resp, err := requestRetryLoop(req, attempts) - defer resp.Body.Close() - - if err != nil { - return Part{}, err - } - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return Part{}, buildError(resp) - } - - part := Part{n, resp.Header.Get("ETag"), partSize} - if part.ETag == "" { - return Part{}, errors.New("part upload succeeded with no ETag") - } - - return part, nil -} - -func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) { - _, err = r.Seek(0, 0) - if err != nil { - return 0, "", "", err - } - digest := md5.New() - size, err = io.Copy(digest, r) - if err != nil { - return 0, "", "", err - } - sum := digest.Sum(nil) - md5hex = hex.EncodeToString(sum) - md5b64 = base64.StdEncoding.EncodeToString(sum) - return size, md5hex, md5b64, nil -} - -type Part struct { - N int `xml:"PartNumber"` - ETag string - Size int64 -} - -type partSlice []Part - -func (s partSlice) Len() int { return len(s) } -func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N } -func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -type listPartsResp struct { - NextPartNumberMarker string - IsTruncated bool - Part []Part -} - -// That's the default. Here just for testing. -var listPartsMax = 1000 - -// ListParts returns the list of previously uploaded parts in m, -// ordered by part number. -// -// See http://goo.gl/ePioY for details. -func (m *Multi) ListParts() ([]Part, error) { - - req, err := http.NewRequest("GET", m.Bucket.Region.ResolveS3BucketEndpoint(m.Bucket.Name), nil) - if err != nil { - return nil, err - } - req.Close = true - req.URL.Path += m.Key - - query := req.URL.Query() - query.Add("uploadId", m.UploadId) - query.Add("max-parts", strconv.FormatInt(int64(listPartsMax), 10)) - req.URL.RawQuery = query.Encode() - - var parts partSlice - for attempt := attempts.Start(); attempt.Next(); { - - addAmazonDateHeader(req.Header) - - // We need to resign every iteration because we're changing the URL. - if err := m.Bucket.S3.Sign(req, m.Bucket.Auth); err != nil { - return nil, err - } - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } else if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - err = buildError(resp) - } - - if err != nil { - if shouldRetry(err) && attempt.HasNext() { - continue - } - return nil, err - } - - var listResp listPartsResp - if err := xml.NewDecoder(resp.Body).Decode(&listResp); err != nil { - return nil, err - } - - parts = append(parts, listResp.Part...) - if listResp.IsTruncated == false { - break - } - - query.Set("part-number-marker", listResp.NextPartNumberMarker) - req.URL.RawQuery = query.Encode() - - // Last request worked; restart our counter. - attempt = attempts.Start() - } - - sort.Sort(parts) - return parts, nil -} - -type ReaderAtSeeker interface { - io.ReaderAt - io.ReadSeeker -} - -// PutAll sends all of r via a multipart upload with parts no larger -// than partSize bytes, which must be set to at least 5MB. -// Parts previously uploaded are either reused if their checksum -// and size match the new part, or otherwise overwritten with the -// new content. -// PutAll returns all the parts of m (reused or not). -func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) { - old, err := m.ListParts() - if err != nil && !hasCode(err, "NoSuchUpload") { - return nil, err - } - - reuse := 0 // Index of next old part to consider reusing. - current := 1 // Part number of latest good part handled. - totalSize, err := r.Seek(0, 2) - if err != nil { - return nil, err - } - first := true // Must send at least one empty part if the file is empty. - var result []Part -NextSection: - for offset := int64(0); offset < totalSize || first; offset += partSize { - first = false - if offset+partSize > totalSize { - partSize = totalSize - offset - } - section := io.NewSectionReader(r, offset, partSize) - _, md5hex, md5b64, err := seekerInfo(section) - if err != nil { - return nil, err - } - for reuse < len(old) && old[reuse].N <= current { - // Looks like this part was already sent. - part := &old[reuse] - etag := `"` + md5hex + `"` - if part.N == current && part.Size == partSize && part.ETag == etag { - // Checksum matches. Reuse the old part. - result = append(result, *part) - current++ - continue NextSection - } - reuse++ - } - - // Part wasn't found or doesn't match. Send it. - part, err := m.putPart(current, section, partSize, md5b64) - if err != nil { - return nil, err - } - result = append(result, part) - current++ - } - return result, nil -} - -type completeUpload struct { - XMLName xml.Name `xml:"CompleteMultipartUpload"` - Parts completeParts `xml:"Part"` -} - -type completePart struct { - PartNumber int - ETag string -} - -type completeParts []completePart - -func (p completeParts) Len() int { return len(p) } -func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber } -func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// Complete assembles the given previously uploaded parts into the -// final object. This operation may take several minutes. -// -// See http://goo.gl/2Z7Tw for details. -func (m *Multi) Complete(parts []Part) error { - - var c completeUpload - for _, p := range parts { - c.Parts = append(c.Parts, completePart{p.N, p.ETag}) - } - sort.Sort(c.Parts) - - data, err := xml.Marshal(&c) - if err != nil { - return err - } - body := bytes.NewReader(data) - - req, err := http.NewRequest( - "POST", - m.Bucket.Region.ResolveS3BucketEndpoint(m.Bucket.Name), - body, - ) - if err != nil { - return err - } - req.Close = true - req.ContentLength = int64(len(data)) - req.URL.Path += m.Key - - query := req.URL.Query() - query.Add("uploadId", m.UploadId) - req.URL.RawQuery = query.Encode() - - addAmazonDateHeader(req.Header) - - if err := m.Bucket.S3.Sign(req, m.Bucket.Auth); err != nil { - return err - } - // Signing may read the request body. - if _, err := body.Seek(0, 0); err != nil { - return err - } - - resp, err := requestRetryLoop(req, attempts) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return buildError(resp) - } - - return nil -} - -// Abort deletes an unifinished multipart upload and any previously -// uploaded parts for it. -// -// After a multipart upload is aborted, no additional parts can be -// uploaded using it. However, if any part uploads are currently in -// progress, those part uploads might or might not succeed. As a result, -// it might be necessary to abort a given multipart upload multiple -// times in order to completely free all storage consumed by all parts. -// -// NOTE: If the described scenario happens to you, please report back to -// the goamz authors with details. In the future such retrying should be -// handled internally, but it's not clear what happens precisely (Is an -// error returned? Is the issue completely undetectable?). -// -// See http://goo.gl/dnyJw for details. -func (m *Multi) Abort() error { - - req, err := http.NewRequest("DELETE", m.Bucket.Region.ResolveS3BucketEndpoint(m.Bucket.Name), nil) - if err != nil { - return nil - } - req.URL.Path += m.Key - - query := req.URL.Query() - query.Add("uploadId", m.UploadId) - req.URL.RawQuery = query.Encode() - - addAmazonDateHeader(req.Header) - - if err := m.Bucket.S3.Sign(req, m.Bucket.Auth); err != nil { - return err - } - _, err = requestRetryLoop(req, attempts) - return err -} diff --git a/Godeps/_workspace/src/gopkg.in/amz.v3/s3/multi_test.go b/Godeps/_workspace/src/gopkg.in/amz.v3/s3/multi_test.go deleted file mode 100644 index 44295d9a3..000000000 --- a/Godeps/_workspace/src/gopkg.in/amz.v3/s3/multi_test.go +++ /dev/null @@ -1,383 +0,0 @@ -package s3_test - -import ( - "encoding/xml" - "io" - "io/ioutil" - "strings" - - . "gopkg.in/check.v1" - - "gopkg.in/amz.v3/s3" -) - -func (s *S) TestInitMulti(c *C) { - testServer.Response(200, nil, InitMultiResultDump) - - b, err := s.s3.Bucket("sample") - c.Assert(err, IsNil) - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "POST") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Header["Content-Type"], DeepEquals, []string{"text/plain"}) - c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"}) - c.Assert(req.Form["uploads"], DeepEquals, []string{""}) - - c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--") -} - -func (s *S) TestMultiNoPreviousUpload(c *C) { - // Don't retry the NoSuchUpload error. - s3.RetryAttempts(false) - - testServer.Response(404, nil, NoSuchUploadErrorDump) - testServer.Response(200, nil, InitMultiResultDump) - - b, err := s.s3.Bucket("sample") - c.Assert(err, IsNil) - - multi, err := b.Multi("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/sample/") - c.Assert(req.Form["uploads"], DeepEquals, []string{""}) - c.Assert(req.Form["prefix"], DeepEquals, []string{"multi"}) - - req = testServer.WaitRequest() - c.Assert(req.Method, Equals, "POST") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form["uploads"], DeepEquals, []string{""}) - - c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--") -} - -func (s *S) TestMultiReturnOld(c *C) { - testServer.Response(200, nil, ListMultiResultDump) - - b, err := s.s3.Bucket("sample") - c.Assert(err, IsNil) - - multi, err := b.Multi("multi1", "text/plain", s3.Private) - c.Assert(err, IsNil) - c.Assert(multi.Key, Equals, "multi1") - c.Assert(multi.UploadId, Equals, "iUVug89pPvSswrikD") - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/sample/") - c.Assert(req.Form["uploads"], DeepEquals, []string{""}) - c.Assert(req.Form["prefix"], DeepEquals, []string{"multi1"}) -} - -func (s *S) TestListParts(c *C) { - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(200, nil, ListPartsResultDump1) - testServer.Response(404, nil, NoSuchUploadErrorDump) // :-( - testServer.Response(200, nil, ListPartsResultDump2) - - b, err := s.s3.Bucket("sample") - c.Assert(err, IsNil) - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - - parts, err := multi.ListParts() - c.Assert(err, IsNil) - c.Assert(parts, HasLen, 3) - c.Assert(parts[0].N, Equals, 1) - c.Assert(parts[0].Size, Equals, int64(5)) - c.Assert(parts[0].ETag, Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`) - c.Assert(parts[1].N, Equals, 2) - c.Assert(parts[1].Size, Equals, int64(5)) - c.Assert(parts[1].ETag, Equals, `"d067a0fa9dc61a6e7195ca99696b5a89"`) - c.Assert(parts[2].N, Equals, 3) - c.Assert(parts[2].Size, Equals, int64(5)) - c.Assert(parts[2].ETag, Equals, `"49dcd91231f801159e893fb5c6674985"`) - testServer.WaitRequest() - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--") - c.Assert(req.Form["max-parts"], DeepEquals, []string{"1000"}) - - testServer.WaitRequest() // The internal error. - req = testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--") - c.Assert(req.Form["max-parts"], DeepEquals, []string{"1000"}) - c.Assert(req.Form["part-number-marker"], DeepEquals, []string{"2"}) -} - -func (s *S) TestPutPart(c *C) { - headers := map[string]string{ - "ETag": `"26f90efd10d614f100252ff56d88dad8"`, - } - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(200, headers, "") - - b, err := s.s3.Bucket("sample") - c.Assert(err, IsNil) - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - - part, err := multi.PutPart(1, strings.NewReader("")) - c.Assert(err, IsNil) - c.Assert(part.N, Equals, 1) - c.Assert(part.Size, Equals, int64(8)) - c.Assert(part.ETag, Equals, headers["ETag"]) - - testServer.WaitRequest() - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--") - c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"8"}) - c.Assert(req.Header["Content-Md5"], DeepEquals, []string{"JvkO/RDWFPEAJS/1bYja2A=="}) -} - -func readAll(r io.Reader) string { - data, err := ioutil.ReadAll(r) - if err != nil { - panic(err) - } - return string(data) -} - -func (s *S) TestPutAllNoPreviousUpload(c *C) { - // Don't retry the NoSuchUpload error. - s3.RetryAttempts(false) - - etag1 := map[string]string{"ETag": `"etag1"`} - etag2 := map[string]string{"ETag": `"etag2"`} - etag3 := map[string]string{"ETag": `"etag3"`} - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(404, nil, NoSuchUploadErrorDump) - testServer.Response(200, etag1, "") - testServer.Response(200, etag2, "") - testServer.Response(200, etag3, "") - - b, err := s.s3.Bucket("sample") - c.Assert(err, IsNil) - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - - parts, err := multi.PutAll(strings.NewReader("part1part2last"), 5) - c.Assert(err, IsNil) - c.Assert(parts, HasLen, 3) - c.Check(parts[0].ETag, Equals, `"etag1"`) - c.Check(parts[1].ETag, Equals, `"etag2"`) - c.Check(parts[2].ETag, Equals, `"etag3"`) - - // Init - testServer.WaitRequest() - - // List old parts. Won't find anything. - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/sample/multi") - - // Send part 1. - req = testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"}) - c.Assert(readAll(req.Body), Equals, "part1") - - // Send part 2. - req = testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"}) - c.Assert(readAll(req.Body), Equals, "part2") - - // Send part 3 with shorter body. - req = testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form["partNumber"], DeepEquals, []string{"3"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"4"}) - c.Assert(readAll(req.Body), Equals, "last") -} - -func (s *S) TestPutAllZeroSizeFile(c *C) { - // Don't retry the NoSuchUpload error. - s3.RetryAttempts(false) - - etag1 := map[string]string{"ETag": `"etag1"`} - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(404, nil, NoSuchUploadErrorDump) - testServer.Response(200, etag1, "") - - b, err := s.s3.Bucket("sample") - c.Assert(err, IsNil) - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - - // Must send at least one part, so that completing it will work. - parts, err := multi.PutAll(strings.NewReader(""), 5) - c.Assert(parts, HasLen, 1) - c.Assert(parts[0].ETag, Equals, `"etag1"`) - c.Assert(err, IsNil) - - // Init - testServer.WaitRequest() - - // List old parts. Won't find anything. - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/sample/multi") - - // Send empty part. - req = testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"0"}) - c.Assert(readAll(req.Body), Equals, "") -} - -func (s *S) TestPutAllResume(c *C) { - etag2 := map[string]string{"ETag": `"etag2"`} - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(200, nil, ListPartsResultDump1) - testServer.Response(200, nil, ListPartsResultDump2) - testServer.Response(200, etag2, "") - - b, err := s.s3.Bucket("sample") - c.Assert(err, IsNil) - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - - // "part1" and "part3" match the checksums in ResultDump1. - // The middle one is a mismatch (it refers to "part2"). - parts, err := multi.PutAll(strings.NewReader("part1partXpart3"), 5) - c.Assert(parts, HasLen, 3) - c.Assert(parts[0].N, Equals, 1) - c.Assert(parts[0].Size, Equals, int64(5)) - c.Assert(parts[0].ETag, Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`) - c.Assert(parts[1].N, Equals, 2) - c.Assert(parts[1].Size, Equals, int64(5)) - c.Assert(parts[1].ETag, Equals, `"etag2"`) - c.Assert(parts[2].N, Equals, 3) - c.Assert(parts[2].Size, Equals, int64(5)) - c.Assert(parts[2].ETag, Equals, `"49dcd91231f801159e893fb5c6674985"`) - c.Assert(err, IsNil) - - // Init - testServer.WaitRequest() - - // List old parts, broken in two requests. - for i := 0; i < 2; i++ { - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/sample/multi") - } - - // Send part 2, as it didn't match the checksum. - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"}) - c.Assert(readAll(req.Body), Equals, "partX") -} - -func (s *S) TestMultiComplete(c *C) { - testServer.Response(200, nil, InitMultiResultDump) - // Note the 200 response. Completing will hold the connection on some - // kind of long poll, and may return a late error even after a 200. - testServer.Response(200, nil, InternalErrorDump) - testServer.Response(200, nil, "") - - b, err := s.s3.Bucket("sample") - c.Assert(err, IsNil) - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - - err = multi.Complete([]s3.Part{{2, `"ETag2"`, 32}, {1, `"ETag1"`, 64}}) - c.Assert(err, IsNil) - - // Grab the 2nd request. - req := testServer.WaitRequests(2)[1] - - c.Assert(req.Method, Equals, "POST") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--") - - var payload struct { - XMLName xml.Name - Part []struct { - PartNumber int - ETag string - } - } - - err = xml.NewDecoder(req.Body).Decode(&payload) - c.Assert(err, IsNil) - - c.Assert(payload.XMLName.Local, Equals, "CompleteMultipartUpload") - c.Assert(len(payload.Part), Equals, 2) - c.Assert(payload.Part[0].PartNumber, Equals, 1) - c.Assert(payload.Part[0].ETag, Equals, `"ETag1"`) - c.Assert(payload.Part[1].PartNumber, Equals, 2) - c.Assert(payload.Part[1].ETag, Equals, `"ETag2"`) -} - -func (s *S) TestMultiAbort(c *C) { - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(200, nil, "") - - b, err := s.s3.Bucket("sample") - c.Assert(err, IsNil) - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - - err = multi.Abort() - c.Assert(err, IsNil) - - testServer.WaitRequest() - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "DELETE") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--") -} - -func (s *S) TestListMulti(c *C) { - testServer.Response(200, nil, ListMultiResultDump) - - b, err := s.s3.Bucket("sample") - c.Assert(err, IsNil) - - multis, prefixes, err := b.ListMulti("", "/") - c.Assert(err, IsNil) - c.Assert(prefixes, DeepEquals, []string{"a/", "b/"}) - c.Assert(multis, HasLen, 2) - c.Assert(multis[0].Key, Equals, "multi1") - c.Assert(multis[0].UploadId, Equals, "iUVug89pPvSswrikD") - c.Assert(multis[1].Key, Equals, "multi2") - c.Assert(multis[1].UploadId, Equals, "DkirwsSvPp98guVUi") - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/sample/") - c.Assert(req.Form["uploads"], DeepEquals, []string{""}) - c.Assert(req.Form["prefix"], DeepEquals, []string{""}) - c.Assert(req.Form["delimiter"], DeepEquals, []string{"/"}) - c.Assert(req.Form["max-uploads"], DeepEquals, []string{"1000"}) -} diff --git a/Godeps/_workspace/src/gopkg.in/amz.v3/s3/responses_test.go b/Godeps/_workspace/src/gopkg.in/amz.v3/s3/responses_test.go deleted file mode 100644 index 7ceaba81b..000000000 --- a/Godeps/_workspace/src/gopkg.in/amz.v3/s3/responses_test.go +++ /dev/null @@ -1,198 +0,0 @@ -package s3_test - -var GetObjectErrorDump = ` - -NoSuchBucketThe specified bucket does not exist -non-existent-bucket3F1B667FAD71C3D8 -L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D -` - -var GetListResultDump1 = ` - - - quotes - N - false - - Nelson - 2006-01-01T12:00:00.000Z - "828ef3fdfa96f00ad9f27c383fc9ac7f" - 5 - STANDARD - - bcaf161ca5fb16fd081034f - webfile - - - - Neo - 2006-01-01T12:00:00.000Z - "828ef3fdfa96f00ad9f27c383fc9ac7f" - 4 - STANDARD - - bcaf1ffd86a5fb16fd081034f - webfile - - - -` - -var GetListResultDump2 = ` - - example-bucket - photos/2006/ - some-marker - 1000 - / - false - - - photos/2006/feb/ - - - photos/2006/jan/ - - -` - -var InitMultiResultDump = ` - - - sample - multi - JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ-- - -` - -var ListPartsResultDump1 = ` - - - sample - multi - JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ-- - - bb5c0f63b0b25f2d099c - joe - - - bb5c0f63b0b25f2d099c - joe - - STANDARD - 0 - 2 - 2 - true - - 1 - 2013-01-30T13:45:51.000Z - "ffc88b4ca90a355f8ddba6b2c3b2af5c" - 5 - - - 2 - 2013-01-30T13:45:52.000Z - "d067a0fa9dc61a6e7195ca99696b5a89" - 5 - - -` - -var ListPartsResultDump2 = ` - - - sample - multi - JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ-- - - bb5c0f63b0b25f2d099c - joe - - - bb5c0f63b0b25f2d099c - joe - - STANDARD - 2 - 3 - 2 - false - - 3 - 2013-01-30T13:46:50.000Z - "49dcd91231f801159e893fb5c6674985" - 5 - - -` - -var ListMultiResultDump = ` - - - goamz-test-bucket-us-east-1-akiajk3wyewhctyqbf7a - - - multi1 - iUVug89pPvSswrikD72p8uO62EzhNtpDxRmwC5WSiWDdK9SfzmDqe3xpP1kMWimyimSnz4uzFc3waVM5ufrKYQ-- - / - 1000 - false - - multi1 - iUVug89pPvSswrikD - - bb5c0f63b0b25f2d0 - gustavoniemeyer - - - bb5c0f63b0b25f2d0 - gustavoniemeyer - - STANDARD - 2013-01-30T18:15:47.000Z - - - multi2 - DkirwsSvPp98guVUi - - bb5c0f63b0b25f2d0 - joe - - - bb5c0f63b0b25f2d0 - joe - - STANDARD - 2013-01-30T18:15:47.000Z - - - a/ - - - b/ - - -` - -var NoSuchUploadErrorDump = ` - - - NoSuchUpload - Not relevant - sample - 3F1B667FAD71C3D8 - kjhwqk - -` - -var InternalErrorDump = ` - - - InternalError - Not relevant - sample - 3F1B667FAD71C3D8 - kjhwqk - -` diff --git a/Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3.go b/Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3.go deleted file mode 100644 index 3f3472477..000000000 --- a/Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3.go +++ /dev/null @@ -1,566 +0,0 @@ -// -// goamz - Go packages to interact with the Amazon Web Services. -// -// https://wiki.ubuntu.com/goamz -// -// Copyright (c) 2011 Canonical Ltd. -// - -package s3 - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "gopkg.in/amz.v3/aws" -) - -const debug = false - -// The S3 type encapsulates operations with an S3 region. -type S3 struct { - aws.Auth - aws.Region - Sign aws.Signer - private byte // Reserve the right of using private data. -} - -// The Bucket type encapsulates operations with an S3 bucket. -type Bucket struct { - *S3 - Name string -} - -// The Owner type represents the owner of the object in an S3 bucket. -type Owner struct { - ID string - DisplayName string -} - -var ( - attempts = defaultAttempts - defaultAttempts = aws.AttemptStrategy{ - Min: 5, - Total: 5 * time.Second, - Delay: 200 * time.Millisecond, - } -) - -// RetryAttempts sets whether failing S3 requests may be retried to cope -// with eventual consistency or temporary failures. It should not be -// called while operations are in progress. -func RetryAttempts(retry bool) { - if retry { - attempts = defaultAttempts - } else { - attempts = aws.AttemptStrategy{} - } -} - -// New creates a new S3. -func New(auth aws.Auth, region aws.Region) *S3 { - return &S3{auth, region, aws.SignV4Factory(region.Name, "s3"), 0} -} - -// Bucket returns a Bucket with the given name. -func (s3 *S3) Bucket(name string) (*Bucket, error) { - if strings.IndexAny(name, "/:@") >= 0 { - return nil, fmt.Errorf("bad S3 bucket: %q", name) - } - if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket { - name = strings.ToLower(name) - } - return &Bucket{s3, name}, nil -} - -var createBucketConfiguration = ` - %s -` - -// locationConstraint returns a *strings.Reader specifying a -// LocationConstraint if required for the region. -// -// See http://goo.gl/bh9Kq for details. -func (s3 *S3) locationConstraint() *strings.Reader { - constraint := "" - if s3.Region.S3LocationConstraint { - constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name) - } - return strings.NewReader(constraint) -} - -type ACL string - -const ( - Private = ACL("private") - PublicRead = ACL("public-read") - PublicReadWrite = ACL("public-read-write") - AuthenticatedRead = ACL("authenticated-read") - BucketOwnerRead = ACL("bucket-owner-read") - BucketOwnerFull = ACL("bucket-owner-full-control") -) - -// Put inserts an object into the S3 bucket. -// -// See http://goo.gl/FEBPD for details. -func (b *Bucket) Put(path string, data []byte, contType string, perm ACL) error { - body := bytes.NewReader(data) - return b.PutReader(path, body, int64(len(data)), contType, perm) -} - -// PutBucket creates a new bucket. -// -// See http://goo.gl/ndjnR for details. -func (b *Bucket) PutBucket(perm ACL) error { - body := b.locationConstraint() - req, err := http.NewRequest("PUT", b.ResolveS3BucketEndpoint(b.Name), body) - if err != nil { - return err - } - req.Close = true - - addAmazonDateHeader(req.Header) - req.Header.Add("x-amz-acl", string(perm)) - - if err := b.S3.Sign(req, b.Auth); err != nil { - return err - } - // Signing may read the request body. - if _, err := body.Seek(0, 0); err != nil { - return err - } - - _, err = http.DefaultClient.Do(req) - return err -} - -// DelBucket removes an existing S3 bucket. All objects in the bucket must -// be removed before the bucket itself can be removed. -// -// See http://goo.gl/GoBrY for details. -func (b *Bucket) DelBucket() (err error) { - - req, err := http.NewRequest("DELETE", b.ResolveS3BucketEndpoint(b.Name), nil) - if err != nil { - return err - } - req.Close = true - addAmazonDateHeader(req.Header) - - if err := b.S3.Sign(req, b.Auth); err != nil { - return err - } - resp, err := requestRetryLoop(req, attempts) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// Get retrieves an object from an S3 bucket. -// -// See http://goo.gl/isCO7 for details. -func (b *Bucket) Get(path string) (data []byte, err error) { - body, err := b.GetReader(path) - if err != nil { - return nil, err - } - defer body.Close() - - return ioutil.ReadAll(body) -} - -// GetReader retrieves an object from an S3 bucket. It is the caller's -// responsibility to call Close on rc when finished reading. -func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) { - - req, err := http.NewRequest("GET", b.Region.ResolveS3BucketEndpoint(b.Name), nil) - if err != nil { - return nil, err - } - req.Close = true - req.URL.Path += path - - addAmazonDateHeader(req.Header) - - if err := b.S3.Sign(req, b.Auth); err != nil { - return nil, err - } - - resp, err := requestRetryLoop(req, attempts) - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return nil, buildError(resp) - } - return resp.Body, nil -} - -// PutReader inserts an object into the S3 bucket by consuming data -// from r until EOF. Passing in an io.ReadSeeker for r will optimize -// the memory usage. -func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL) error { - return b.PutReaderWithHeader(path, r, length, contType, perm, http.Header{}) -} - -// PutReaderWithHeader inserts an object into the S3 bucket by -// consuming data from r until EOF. It also adds the headers provided -// to the request. Passing in an io.ReadSeeker for r will optimize the -// memory usage. -func (b *Bucket) PutReaderWithHeader(path string, r io.Reader, length int64, contType string, perm ACL, hdrs http.Header) error { - - // Convert the reader to a ReadSeeker so we can seek after - // signing. - seeker, ok := r.(io.ReadSeeker) - if !ok { - content, err := ioutil.ReadAll(r) - if err != nil { - return err - } - seeker = bytes.NewReader(content) - } - - req, err := http.NewRequest("PUT", b.Region.ResolveS3BucketEndpoint(b.Name), seeker) - if err != nil { - return err - } - req.Header = hdrs - req.Close = true - req.URL.Path += path - req.ContentLength = length - - req.Header.Add("Content-Type", contType) - req.Header.Add("x-amz-acl", string(perm)) - addAmazonDateHeader(req.Header) - - // Determine the current offset. - const seekFromPos = 1 - prevPos, err := seeker.Seek(0, seekFromPos) - if err != nil { - return err - } - - if err := b.S3.Sign(req, b.Auth); err != nil { - return err - } - // Signing may read the request body. - if _, err := seeker.Seek(prevPos, 0); err != nil { - return err - } - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return buildError(resp) // closes body - } - - resp.Body.Close() - return nil -} - -// Del removes an object from the S3 bucket. -// -// See http://goo.gl/APeTt for details. -func (b *Bucket) Del(path string) error { - - req, err := http.NewRequest("DELETE", b.ResolveS3BucketEndpoint(b.Name), nil) - if err != nil { - return err - } - req.Close = true - req.URL.Path += path - - addAmazonDateHeader(req.Header) - - if err := b.S3.Sign(req, b.Auth); err != nil { - return err - } - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// The ListResp type holds the results of a List bucket operation. -type ListResp struct { - Name string - Prefix string - Delimiter string - Marker string - NextMarker string - MaxKeys int - // IsTruncated is true if the results have been truncated because - // there are more keys and prefixes than can fit in MaxKeys. - // N.B. this is the opposite sense to that documented (incorrectly) in - // http://goo.gl/YjQTc - IsTruncated bool - Contents []Key - CommonPrefixes []string `xml:">Prefix"` -} - -// The Key type represents an item stored in an S3 bucket. -type Key struct { - Key string - LastModified string - Size int64 - // ETag gives the hex-encoded MD5 sum of the contents, - // surrounded with double-quotes. - ETag string - StorageClass string - Owner Owner -} - -// List returns information about objects in an S3 bucket. -// -// The prefix parameter limits the response to keys that begin with the -// specified prefix. -// -// The delim parameter causes the response to group all of the keys that -// share a common prefix up to the next delimiter in a single entry within -// the CommonPrefixes field. You can use delimiters to separate a bucket -// into different groupings of keys, similar to how folders would work. -// -// The marker parameter specifies the key to start with when listing objects -// in a bucket. Amazon S3 lists objects in alphabetical order and -// will return keys alphabetically greater than the marker. -// -// The max parameter specifies how many keys + common prefixes to return in -// the response. The default is 1000. -// -// For example, given these keys in a bucket: -// -// index.html -// index2.html -// photos/2006/January/sample.jpg -// photos/2006/February/sample2.jpg -// photos/2006/February/sample3.jpg -// photos/2006/February/sample4.jpg -// -// Listing this bucket with delimiter set to "/" would yield the -// following result: -// -// &ListResp{ -// Name: "sample-bucket", -// MaxKeys: 1000, -// Delimiter: "/", -// Contents: []Key{ -// {Key: "index.html", "index2.html"}, -// }, -// CommonPrefixes: []string{ -// "photos/", -// }, -// } -// -// Listing the same bucket with delimiter set to "/" and prefix set to -// "photos/2006/" would yield the following result: -// -// &ListResp{ -// Name: "sample-bucket", -// MaxKeys: 1000, -// Delimiter: "/", -// Prefix: "photos/2006/", -// CommonPrefixes: []string{ -// "photos/2006/February/", -// "photos/2006/January/", -// }, -// } -// -// See http://goo.gl/YjQTc for details. -func (b *Bucket) List(prefix, delim, marker string, max int) (*ListResp, error) { - - req, err := http.NewRequest("GET", b.ResolveS3BucketEndpoint(b.Name), nil) - if err != nil { - return nil, err - } - req.Close = true - - query := req.URL.Query() - query.Add("prefix", prefix) - query.Add("delimiter", delim) - query.Add("marker", marker) - if max != 0 { - query.Add("max-keys", strconv.FormatInt(int64(max), 10)) - } - req.URL.RawQuery = query.Encode() - - addAmazonDateHeader(req.Header) - - if err := b.S3.Sign(req, b.Auth); err != nil { - return nil, err - } - - resp, err := requestRetryLoop(req, attempts) - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return nil, buildError(resp) // closes body - } - - var result ListResp - err = xml.NewDecoder(resp.Body).Decode(&result) - resp.Body.Close() - return &result, nil -} - -// URL returns a non-signed URL that allows retriving the -// object at path. It only works if the object is publicly -// readable (see SignedURL). -func (b *Bucket) URL(path string) string { - return b.ResolveS3BucketEndpoint(b.Name) + path -} - -// SignedURL returns a URL which can be used to fetch objects without -// signing for the given duration. -func (b *Bucket) SignedURL(path string, expires time.Duration) (string, error) { - req, err := http.NewRequest("GET", b.URL(path), nil) - if err != nil { - return "", err - } - req.Header.Add("date", time.Now().Format(aws.ISO8601BasicFormat)) - - if err := aws.SignV4URL(req, b.Auth, b.Region.Name, "s3", expires); err != nil { - return "", err - } - return req.URL.String(), nil -} - -type request struct { - method string - bucket string - path string - signpath string - params url.Values - headers http.Header - baseurl string - payload io.Reader - prepared bool -} - -func (req *request) url() (*url.URL, error) { - u, err := url.Parse(req.baseurl) - if err != nil { - return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err) - } - u.RawQuery = req.params.Encode() - u.Path = req.path - return u, nil -} - -// Error represents an error in an operation with S3. -type Error struct { - StatusCode int // HTTP status code (200, 403, ...) - Code string // EC2 error code ("UnsupportedOperation", ...) - Message string // The human-oriented error message - BucketName string - RequestId string - HostId string -} - -func (e *Error) Error() string { - return e.Message -} - -func buildError(r *http.Response) error { - if debug { - log.Printf("got error (status code %v)", r.StatusCode) - data, err := ioutil.ReadAll(r.Body) - if err != nil { - log.Printf("\tread error: %v", err) - } else { - log.Printf("\tdata:\n%s\n\n", data) - } - r.Body = ioutil.NopCloser(bytes.NewBuffer(data)) - } - - err := Error{} - // TODO return error if Unmarshal fails? - xml.NewDecoder(r.Body).Decode(&err) - r.Body.Close() - err.StatusCode = r.StatusCode - if err.Message == "" { - err.Message = r.Status - } - if debug { - log.Printf("err: %#v\n", err) - } - return &err -} - -func shouldRetry(err error) bool { - if err == nil { - return false - } - switch err { - case io.ErrUnexpectedEOF, io.EOF: - return true - } - switch e := err.(type) { - case *net.DNSError: - return true - case *net.OpError: - switch e.Op { - case "read", "write": - return true - } - case *Error: - switch e.Code { - case "InternalError", "NoSuchUpload", "NoSuchBucket": - return true - } - } - return false -} - -func hasCode(err error, code string) bool { - s3err, ok := err.(*Error) - return ok && s3err.Code == code -} - -// requestRetryLoop attempts to send the request until the given -// strategy says to stop. -func requestRetryLoop(req *http.Request, retryStrat aws.AttemptStrategy) (*http.Response, error) { - - for attempt := attempts.Start(); attempt.Next(); { - - if debug { - log.Printf("Full URL (in loop): %v", req.URL) - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - if shouldRetry(err) && attempt.HasNext() { - continue - } - return nil, fmt.Errorf("making request: %v", err) - } - - if debug { - log.Printf("Full response (in loop): %v", resp) - } - - return resp, nil - } - - return nil, fmt.Errorf("could not complete the request within the specified retry attempts") -} - -func addAmazonDateHeader(header http.Header) { - header.Set("x-amz-date", time.Now().In(time.UTC).Format(aws.ISO8601BasicFormat)) -} diff --git a/Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3_test.go b/Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3_test.go deleted file mode 100644 index 7f9b4ad51..000000000 --- a/Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3_test.go +++ /dev/null @@ -1,321 +0,0 @@ -package s3_test - -import ( - "bytes" - "io/ioutil" - "net/http" - "testing" - "time" - - . "gopkg.in/check.v1" - - "gopkg.in/amz.v3/aws" - "gopkg.in/amz.v3/s3" - "gopkg.in/amz.v3/testutil" -) - -func Test(t *testing.T) { - TestingT(t) -} - -type S struct { - s3 *s3.S3 -} - -var _ = Suite(&S{}) - -var testServer = testutil.NewHTTPServer() - -func (s *S) SetUpSuite(c *C) { - testServer.Start() - s.s3 = s3.New( - aws.Auth{"abc", "123"}, - aws.Region{ - Name: "faux-region-1", - S3Endpoint: testServer.URL, - }, - ) -} - -func (s *S) TearDownSuite(c *C) { - s3.SetAttemptStrategy(nil) - testServer.Stop() -} - -func (s *S) SetUpTest(c *C) { - attempts := aws.AttemptStrategy{ - Total: 300 * time.Millisecond, - Delay: 100 * time.Millisecond, - } - s3.SetAttemptStrategy(&attempts) -} - -func (s *S) TearDownTest(c *C) { - testServer.Flush() -} - -// PutBucket docs: http://goo.gl/kBTCu - -func (s *S) TestPutBucket(c *C) { - testServer.Response(200, nil, "") - - b, err := s.s3.Bucket("bucket") - c.Assert(err, IsNil) - err = b.PutBucket(s3.Private) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/bucket/") - c.Assert(req.Header["Date"], Not(Equals), "") -} - -func (s *S) TestURL(c *C) { - testServer.Response(200, nil, "content") - - b, err := s.s3.Bucket("bucket") - c.Assert(err, IsNil) - url := b.URL("name") - r, err := http.Get(url) - c.Assert(err, IsNil) - data, err := ioutil.ReadAll(r.Body) - r.Body.Close() - c.Assert(err, IsNil) - c.Assert(string(data), Equals, "content") - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/bucket/name") -} - -// DeleteBucket docs: http://goo.gl/GoBrY - -func (s *S) TestDelBucket(c *C) { - testServer.Response(204, nil, "") - - b, err := s.s3.Bucket("bucket") - c.Assert(err, IsNil) - err = b.DelBucket() - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "DELETE") - c.Assert(req.URL.Path, Equals, "/bucket/") - c.Assert(req.Header["Date"], Not(Equals), "") -} - -// GetObject docs: http://goo.gl/isCO7 - -func (s *S) TestGet(c *C) { - testServer.Response(200, nil, "content") - - b, err := s.s3.Bucket("bucket") - c.Assert(err, IsNil) - data, err := b.Get("name") - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/bucket/name") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(err, IsNil) - c.Assert(string(data), Equals, "content") -} - -func (s *S) TestGetReader(c *C) { - testServer.Response(200, nil, "content") - - b, err := s.s3.Bucket("bucket") - c.Assert(err, IsNil) - rc, err := b.GetReader("name") - c.Assert(err, IsNil) - data, err := ioutil.ReadAll(rc) - rc.Close() - c.Assert(err, IsNil) - c.Assert(string(data), Equals, "content") - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/bucket/name") - c.Assert(req.Header["Date"], Not(Equals), "") -} - -func (s *S) TestGetNotFound(c *C) { - for i := 0; i < 10; i++ { - testServer.Response(404, nil, GetObjectErrorDump) - } - - b, err := s.s3.Bucket("non-existent-bucket") - c.Assert(err, IsNil) - data, err := b.Get("non-existent") - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/non-existent-bucket/non-existent") - c.Assert(req.Header["Date"], Not(Equals), "") - - s3err, _ := err.(*s3.Error) - c.Assert(s3err, NotNil) - c.Assert(s3err.StatusCode, Equals, 404) - c.Assert(s3err.BucketName, Equals, "non-existent-bucket") - c.Assert(s3err.RequestId, Equals, "3F1B667FAD71C3D8") - c.Assert(s3err.HostId, Equals, "L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D") - c.Assert(s3err.Code, Equals, "NoSuchBucket") - c.Assert(s3err.Message, Equals, "The specified bucket does not exist") - c.Assert(s3err.Error(), Equals, "The specified bucket does not exist") - c.Assert(data, IsNil) -} - -// PutObject docs: http://goo.gl/FEBPD - -func (s *S) TestPutObject(c *C) { - testServer.Response(200, nil, "") - - b, err := s.s3.Bucket("bucket") - c.Assert(err, IsNil) - err = b.Put("name", []byte("content"), "content-type", s3.Private) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/bucket/name") - c.Assert(req.Header["Date"], Not(DeepEquals), []string{""}) - c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"}) - //c.Assert(req.Header["Content-MD5"], DeepEquals, "...") - c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"}) -} - -func (s *S) TestPutReader(c *C) { - testServer.Response(200, nil, "") - - b, err := s.s3.Bucket("bucket") - c.Assert(err, IsNil) - buf := bytes.NewReader([]byte("content")) - err = b.PutReader("name", buf, int64(buf.Len()), "content-type", s3.Private) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/bucket/name") - c.Assert(req.Header["Date"], Not(DeepEquals), []string{""}) - c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"}) - //c.Assert(req.Header["Content-MD5"], Equals, "...") - c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"}) -} - -func (s *S) TestPutReaderWithHeader(c *C) { - testServer.Response(200, nil, "") - - b, err := s.s3.Bucket("bucket") - c.Assert(err, IsNil) - buf := bytes.NewReader([]byte("content")) - err = b.PutReaderWithHeader("name", buf, int64(buf.Len()), "content-type", s3.Private, http.Header{ - "Cache-Control": []string{"max-age=5"}, - }) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/bucket/name") - c.Assert(req.Header["Date"], Not(DeepEquals), []string{""}) - c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"}) - c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"}) - c.Assert(req.Header["Cache-Control"], DeepEquals, []string{"max-age=5"}) -} - -// DelObject docs: http://goo.gl/APeTt - -func (s *S) TestDelObject(c *C) { - testServer.Response(200, nil, "") - - b, err := s.s3.Bucket("bucket") - c.Assert(err, IsNil) - err = b.Del("name") - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "DELETE") - c.Assert(req.URL.Path, Equals, "/bucket/name") - c.Assert(req.Header["Date"], Not(Equals), "") -} - -// Bucket List Objects docs: http://goo.gl/YjQTc - -func (s *S) TestList(c *C) { - testServer.Response(200, nil, GetListResultDump1) - - b, err := s.s3.Bucket("quotes") - c.Assert(err, IsNil) - - data, err := b.List("N", "", "", 0) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/quotes/") - c.Assert(req.Header["Date"], Not(Equals), "") - c.Assert(req.Form["prefix"], DeepEquals, []string{"N"}) - c.Assert(req.Form["delimiter"], DeepEquals, []string{""}) - c.Assert(req.Form["marker"], DeepEquals, []string{""}) - c.Assert(req.Form["max-keys"], DeepEquals, []string(nil)) - - c.Assert(data.Name, Equals, "quotes") - c.Assert(data.Prefix, Equals, "N") - c.Assert(data.IsTruncated, Equals, false) - c.Assert(len(data.Contents), Equals, 2) - - c.Assert(data.Contents[0].Key, Equals, "Nelson") - c.Assert(data.Contents[0].LastModified, Equals, "2006-01-01T12:00:00.000Z") - c.Assert(data.Contents[0].ETag, Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`) - c.Assert(data.Contents[0].Size, Equals, int64(5)) - c.Assert(data.Contents[0].StorageClass, Equals, "STANDARD") - c.Assert(data.Contents[0].Owner.ID, Equals, "bcaf161ca5fb16fd081034f") - c.Assert(data.Contents[0].Owner.DisplayName, Equals, "webfile") - - c.Assert(data.Contents[1].Key, Equals, "Neo") - c.Assert(data.Contents[1].LastModified, Equals, "2006-01-01T12:00:00.000Z") - c.Assert(data.Contents[1].ETag, Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`) - c.Assert(data.Contents[1].Size, Equals, int64(4)) - c.Assert(data.Contents[1].StorageClass, Equals, "STANDARD") - c.Assert(data.Contents[1].Owner.ID, Equals, "bcaf1ffd86a5fb16fd081034f") - c.Assert(data.Contents[1].Owner.DisplayName, Equals, "webfile") -} - -func (s *S) TestListWithDelimiter(c *C) { - testServer.Response(200, nil, GetListResultDump2) - - b, err := s.s3.Bucket("quotes") - c.Assert(err, IsNil) - - data, err := b.List("photos/2006/", "/", "some-marker", 1000) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/quotes/") - c.Assert(req.Header["Date"], Not(Equals), "") - c.Assert(req.Form["prefix"], DeepEquals, []string{"photos/2006/"}) - c.Assert(req.Form["delimiter"], DeepEquals, []string{"/"}) - c.Assert(req.Form["marker"], DeepEquals, []string{"some-marker"}) - c.Assert(req.Form["max-keys"], DeepEquals, []string{"1000"}) - - c.Assert(data.Name, Equals, "example-bucket") - c.Assert(data.Prefix, Equals, "photos/2006/") - c.Assert(data.Delimiter, Equals, "/") - c.Assert(data.Marker, Equals, "some-marker") - c.Assert(data.IsTruncated, Equals, false) - c.Assert(len(data.Contents), Equals, 0) - c.Assert(data.CommonPrefixes, DeepEquals, []string{"photos/2006/feb/", "photos/2006/jan/"}) -} - -func (s *S) TestRetryAttempts(c *C) { - s3.SetAttemptStrategy(nil) - orig := s3.AttemptStrategy() - s3.RetryAttempts(false) - c.Assert(s3.AttemptStrategy(), Equals, aws.AttemptStrategy{}) - s3.RetryAttempts(true) - c.Assert(s3.AttemptStrategy(), Equals, orig) -} diff --git a/Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3i_test.go b/Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3i_test.go deleted file mode 100644 index 875c7d498..000000000 --- a/Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3i_test.go +++ /dev/null @@ -1,610 +0,0 @@ -package s3_test - -import ( - "bytes" - "crypto/md5" - "fmt" - "io/ioutil" - "net" - "net/http" - "sort" - "strings" - "time" - - . "gopkg.in/check.v1" - - "gopkg.in/amz.v3/aws" - "gopkg.in/amz.v3/s3" - "gopkg.in/amz.v3/testutil" -) - -// AmazonServer represents an Amazon S3 server. -type AmazonServer struct { - auth aws.Auth -} - -func (s *AmazonServer) SetUp(c *C) { - auth, err := aws.EnvAuth() - if err != nil { - c.Fatal(err.Error()) - } - s.auth = auth -} - -var _ = Suite(&AmazonClientSuite{Region: aws.USEast}) -var _ = Suite(&AmazonClientSuite{Region: aws.EUWest}) -var _ = Suite(&AmazonDomainClientSuite{Region: aws.USEast}) - -// AmazonClientSuite tests the client against a live S3 server. -type AmazonClientSuite struct { - aws.Region - srv AmazonServer - ClientTests -} - -func (s *AmazonClientSuite) SetUpSuite(c *C) { - if !testutil.Amazon { - c.Skip("live tests against AWS disabled (no -amazon)") - } - s.srv.SetUp(c) - s.s3 = s3.New(s.srv.auth, s.Region) - // In case tests were interrupted in the middle before. - s.ClientTests.Cleanup() -} - -func (s *AmazonClientSuite) TearDownTest(c *C) { - s.ClientTests.Cleanup() -} - -// AmazonDomainClientSuite tests the client against a live S3 -// server using bucket names in the endpoint domain name rather -// than the request path. -type AmazonDomainClientSuite struct { - aws.Region - srv AmazonServer - ClientTests -} - -func (s *AmazonDomainClientSuite) SetUpSuite(c *C) { - if !testutil.Amazon { - c.Skip("live tests against AWS disabled (no -amazon)") - } - s.srv.SetUp(c) - region := s.Region - region.S3BucketEndpoint += "https://s3.amazonaws.com/${bucket}/" - s.s3 = s3.New(s.srv.auth, region) - s.ClientTests.Cleanup() -} - -func (s *AmazonDomainClientSuite) TearDownTest(c *C) { - s.ClientTests.Cleanup() -} - -// ClientTests defines integration tests designed to test the client. -// It is not used as a test suite in itself, but embedded within -// another type. -type ClientTests struct { - s3 *s3.S3 -} - -func (s *ClientTests) Cleanup() { - killBucket(testBucket(s.s3)) -} - -func testBucket(s *s3.S3) *s3.Bucket { - // Watch out! If this function is corrupted and made to match with something - // people own, killBucket will happily remove *everything* inside the bucket. - key := s.Auth.AccessKey - if len(key) >= 8 { - key = s.Auth.AccessKey[:8] - } - b, err := s.Bucket(strings.ToLower(fmt.Sprintf( - "goamz-%s-%s-%s", - s.Region.Name, - key, - // Add in the time element to help isolate tests from one - // another. - time.Now().Format("20060102T150405.999999999"), - ))) - if err != nil { - panic(err) - } - return b -} - -var attempts = aws.AttemptStrategy{ - Min: 5, - Total: 20 * time.Second, - Delay: 100 * time.Millisecond, -} - -func killBucket(b *s3.Bucket) { - var err error - for attempt := attempts.Start(); attempt.Next(); { - err = b.DelBucket() - if err == nil { - return - } - if _, ok := err.(*net.DNSError); ok { - return - } - e, ok := err.(*s3.Error) - if ok && e.Code == "NoSuchBucket" { - return - } - if ok && e.Code == "BucketNotEmpty" { - // Errors are ignored here. Just retry. - resp, err := b.List("", "", "", 1000) - if err == nil { - for _, key := range resp.Contents { - _ = b.Del(key.Key) - } - } - multis, _, _ := b.ListMulti("", "") - for _, m := range multis { - _ = m.Abort() - } - } - } - message := "cannot delete test bucket" - if err != nil { - message += ": " + err.Error() - } - panic(message) -} - -func (s *ClientTests) TestSignedUrl(c *C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.PublicRead) - c.Assert(err, IsNil) - s.testSignedUrl(c, b, "name") - // Test that various special characters get escaped properly. - s.testSignedUrl(c, b, "&@$=:,!-_.*'( )") -} - -func (s *ClientTests) testSignedUrl(c *C, b *s3.Bucket, name string) { - err := b.Put(name, []byte("test for signed URLs."), "text/plain", s3.Private) - c.Assert(err, IsNil) - defer b.Del(name) - - req, err := http.NewRequest("GET", b.URL(name), nil) - c.Assert(err, IsNil) - resp, err := http.DefaultClient.Do(req) - c.Assert(err, IsNil) - err = s3.BuildError(resp) - c.Check(err, NotNil) - c.Check(err.(*s3.Error).Code, Equals, "AccessDenied") - - url, err := b.SignedURL(name, 24*time.Hour) - c.Assert(err, IsNil) - - req, err = http.NewRequest("GET", url, nil) - c.Assert(err, IsNil) - resp, err = http.DefaultClient.Do(req) - c.Assert(err, IsNil) - - body, err := ioutil.ReadAll(resp.Body) - c.Assert(err, IsNil) - - c.Check(string(body), Equals, "test for signed URLs.") -} - -func (s *ClientTests) TestBasicFunctionality(c *C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.PublicRead) - c.Assert(err, IsNil) - - err = b.Put("name", []byte("yo!"), "text/plain", s3.PublicRead) - c.Assert(err, IsNil) - defer b.Del("name") - - data, err := b.Get("name") - c.Assert(err, IsNil) - c.Assert(string(data), Equals, "yo!") - - buf := bytes.NewReader([]byte("hey!")) - err = b.PutReader("name2", buf, int64(buf.Len()), "text/plain", s3.Private) - c.Assert(err, IsNil) - defer b.Del("name2") - - rc, err := b.GetReader("name2") - c.Assert(err, IsNil) - data, err = ioutil.ReadAll(rc) - c.Check(err, IsNil) - c.Check(string(data), Equals, "hey!") - rc.Close() - - data, err = b.Get("name2") - c.Assert(err, IsNil) - c.Assert(string(data), Equals, "hey!") - - err = b.Del("name") - c.Assert(err, IsNil) - err = b.Del("name2") - c.Assert(err, IsNil) - - err = b.DelBucket() - c.Assert(err, IsNil) -} - -func (s *ClientTests) TestGetNotFound(c *C) { - b, err := s.s3.Bucket("goamz-" + s.s3.Auth.AccessKey) - c.Assert(err, IsNil) - data, err := b.Get("non-existent") - - s3err, _ := err.(*s3.Error) - c.Assert(s3err, NotNil) - c.Assert(s3err.StatusCode, Equals, 404) - c.Assert(s3err.Code, Equals, "NoSuchBucket") - c.Assert(s3err.Message, Equals, "The specified bucket does not exist") - c.Assert(data, IsNil) -} - -// Communicate with all endpoints to see if they are alive. -func (s *ClientTests) TestRegions(c *C) { - type result struct { - aws.Region - error - } - - results := make(chan result, len(aws.Regions)) - for _, region := range aws.Regions { - go func(r aws.Region) { - s := s3.New(s.s3.Auth, r) - b, err := s.Bucket("goamz-" + s.Auth.AccessKey) - if !c.Check(err, IsNil) { - return - } - - _, err = b.Get("non-existent") - if !c.Check(err, NotNil) { - return - } - - results <- result{r, err} - }(region) - } - for _ = range aws.Regions { - result := <-results - if s3_err, ok := result.error.(*s3.Error); ok { - if result.Region == aws.CNNorth && s3_err.Code == "InvalidAccessKeyId" { - c.Log("You must utilize an account specifically for CNNorth.") - continue - } - c.Check(s3_err.Code, Matches, "NoSuchBucket") - } else if _, ok = result.error.(*net.DNSError); ok { - // Okay as well. - } else { - c.Errorf("Non-S3 error: %s", result.error) - } - } -} - -var objectNames = []string{ - "index.html", - "index2.html", - "photos/2006/February/sample2.jpg", - "photos/2006/February/sample3.jpg", - "photos/2006/February/sample4.jpg", - "photos/2006/January/sample.jpg", - "test/bar", - "test/foo", -} - -func keys(names ...string) []s3.Key { - ks := make([]s3.Key, len(names)) - for i, name := range names { - ks[i].Key = name - } - return ks -} - -// As the ListResp specifies all the parameters to the -// request too, we use it to specify request parameters -// and expected results. The Contents field is -// used only for the key names inside it. -var listTests = []s3.ListResp{ - // normal list. - { - Contents: keys(objectNames...), - }, { - Marker: objectNames[0], - Contents: keys(objectNames[1:]...), - }, { - Marker: objectNames[0] + "a", - Contents: keys(objectNames[1:]...), - }, { - Marker: "z", - }, - - // limited results. - { - MaxKeys: 2, - Contents: keys(objectNames[0:2]...), - IsTruncated: true, - }, { - MaxKeys: 2, - Marker: objectNames[0], - Contents: keys(objectNames[1:3]...), - IsTruncated: true, - }, { - MaxKeys: 2, - Marker: objectNames[len(objectNames)-2], - Contents: keys(objectNames[len(objectNames)-1:]...), - }, - - // with delimiter - { - Delimiter: "/", - CommonPrefixes: []string{"photos/", "test/"}, - Contents: keys("index.html", "index2.html"), - }, { - Delimiter: "/", - Prefix: "photos/2006/", - CommonPrefixes: []string{"photos/2006/February/", "photos/2006/January/"}, - }, { - Delimiter: "/", - Prefix: "t", - CommonPrefixes: []string{"test/"}, - }, { - Delimiter: "/", - MaxKeys: 1, - Contents: keys("index.html"), - IsTruncated: true, - }, { - Delimiter: "/", - MaxKeys: 1, - Marker: "index2.html", - CommonPrefixes: []string{"photos/"}, - IsTruncated: true, - }, { - Delimiter: "/", - MaxKeys: 1, - Marker: "photos/", - CommonPrefixes: []string{"test/"}, - IsTruncated: false, - }, { - Delimiter: "Feb", - CommonPrefixes: []string{"photos/2006/Feb"}, - Contents: keys("index.html", "index2.html", "photos/2006/January/sample.jpg", "test/bar", "test/foo"), - }, -} - -func (s *ClientTests) TestDoublePutBucket(c *C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.PublicRead) - c.Assert(err, IsNil) - - err = b.PutBucket(s3.PublicRead) - if err != nil { - c.Assert(err, FitsTypeOf, new(s3.Error)) - c.Assert(err.(*s3.Error).Code, Equals, "BucketAlreadyOwnedByYou") - } -} - -func (s *ClientTests) TestBucketList(c *C) { - b := testBucket(s.s3) - defer b.DelBucket() - err := b.PutBucket(s3.Private) - c.Assert(err, IsNil) - - objData := make(map[string][]byte) - for i, path := range objectNames { - data := []byte(strings.Repeat("a", i)) - err := b.Put(path, data, "text/plain", s3.Private) - c.Assert(err, IsNil) - defer b.Del(path) - objData[path] = data - } - - for i, t := range listTests { - c.Logf("test %d", i) - resp, err := b.List(t.Prefix, t.Delimiter, t.Marker, t.MaxKeys) - c.Assert(err, IsNil) - c.Check(resp.Name, Equals, b.Name) - c.Check(resp.Delimiter, Equals, t.Delimiter) - c.Check(resp.IsTruncated, Equals, t.IsTruncated) - c.Check(resp.CommonPrefixes, DeepEquals, t.CommonPrefixes) - checkContents(c, resp.Contents, objData, t.Contents) - } -} - -func etag(data []byte) string { - sum := md5.New() - sum.Write(data) - return fmt.Sprintf(`"%x"`, sum.Sum(nil)) -} - -func checkContents(c *C, contents []s3.Key, data map[string][]byte, expected []s3.Key) { - c.Assert(contents, HasLen, len(expected)) - for i, k := range contents { - c.Check(k.Key, Equals, expected[i].Key) - // TODO mtime - c.Check(k.Size, Equals, int64(len(data[k.Key]))) - c.Check(k.ETag, Equals, etag(data[k.Key])) - } -} - -func (s *ClientTests) TestMultiInitPutList(c *C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.Private) - c.Assert(err, IsNil) - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - c.Assert(multi.UploadId, Matches, ".+") - defer multi.Abort() - - var sent []s3.Part - - for i := 0; i < 5; i++ { - p, err := multi.PutPart(i+1, strings.NewReader(fmt.Sprintf("", i+1))) - c.Assert(err, IsNil) - c.Assert(p.N, Equals, i+1) - c.Assert(p.Size, Equals, int64(8)) - c.Assert(p.ETag, Matches, ".+") - sent = append(sent, p) - } - - s3.SetListPartsMax(2) - - parts, err := multi.ListParts() - c.Assert(err, IsNil) - c.Assert(parts, HasLen, len(sent)) - for i := range parts { - c.Assert(parts[i].N, Equals, sent[i].N) - c.Assert(parts[i].Size, Equals, sent[i].Size) - c.Assert(parts[i].ETag, Equals, sent[i].ETag) - } - - err = multi.Complete(parts) - s3err, failed := err.(*s3.Error) - c.Assert(failed, Equals, true) - c.Assert(s3err.Code, Equals, "EntityTooSmall") - - err = multi.Abort() - c.Assert(err, IsNil) - _, err = multi.ListParts() - s3err, ok := err.(*s3.Error) - c.Assert(ok, Equals, true) - c.Assert(s3err.Code, Equals, "NoSuchUpload") -} - -// This may take a minute or more due to the minimum size accepted S3 -// on multipart upload parts. -func (s *ClientTests) TestMultiComplete(c *C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.Private) - c.Assert(err, IsNil) - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - c.Assert(multi.UploadId, Matches, ".+") - defer multi.Abort() - - // Minimum size S3 accepts for all but the last part is 5MB. - data1 := make([]byte, 5*1024*1024) - data2 := []byte("") - - part1, err := multi.PutPart(1, bytes.NewReader(data1)) - c.Assert(err, IsNil) - part2, err := multi.PutPart(2, bytes.NewReader(data2)) - c.Assert(err, IsNil) - - // Purposefully reversed. The order requirement must be handled. - err = multi.Complete([]s3.Part{part2, part1}) - c.Assert(err, IsNil) - - data, err := b.Get("multi") - c.Assert(err, IsNil) - - c.Assert(len(data), Equals, len(data1)+len(data2)) - for i := range data1 { - if data[i] != data1[i] { - c.Fatalf("uploaded object at byte %d: want %d, got %d", data1[i], data[i]) - } - } - c.Assert(string(data[len(data1):]), Equals, string(data2)) -} - -type multiList []*s3.Multi - -func (l multiList) Len() int { return len(l) } -func (l multiList) Less(i, j int) bool { return l[i].Key < l[j].Key } -func (l multiList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } - -func (s *ClientTests) TestListMulti(c *C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.Private) - c.Assert(err, IsNil) - - // Ensure an empty state before testing its behavior. - multis, _, err := b.ListMulti("", "") - for _, m := range multis { - err := m.Abort() - c.Assert(err, IsNil) - } - - keys := []string{ - "a/multi2", - "a/multi3", - "b/multi4", - "multi1", - } - for _, key := range keys { - m, err := b.InitMulti(key, "", s3.Private) - c.Assert(err, IsNil) - defer m.Abort() - } - - // Amazon's implementation of the multiple-request listing for - // multipart uploads in progress seems broken in multiple ways. - // (next tokens are not provided, etc). - //s3.SetListMultiMax(2) - - multis, prefixes, err := b.ListMulti("", "") - c.Assert(err, IsNil) - for attempt := attempts.Start(); attempt.Next() && len(multis) < len(keys); { - multis, prefixes, err = b.ListMulti("", "") - c.Assert(err, IsNil) - } - sort.Sort(multiList(multis)) - c.Assert(prefixes, IsNil) - var gotKeys []string - for _, m := range multis { - gotKeys = append(gotKeys, m.Key) - } - c.Assert(gotKeys, DeepEquals, keys) - for _, m := range multis { - c.Assert(m.Bucket, Equals, b) - c.Assert(m.UploadId, Matches, ".+") - } - - multis, prefixes, err = b.ListMulti("", "/") - for attempt := attempts.Start(); attempt.Next() && len(prefixes) < 2; { - multis, prefixes, err = b.ListMulti("", "") - c.Assert(err, IsNil) - } - c.Assert(err, IsNil) - c.Assert(prefixes, DeepEquals, []string{"a/", "b/"}) - c.Assert(multis, HasLen, 1) - c.Assert(multis[0].Bucket, Equals, b) - c.Assert(multis[0].Key, Equals, "multi1") - c.Assert(multis[0].UploadId, Matches, ".+") - - for attempt := attempts.Start(); attempt.Next() && len(multis) < 2; { - multis, prefixes, err = b.ListMulti("", "") - c.Assert(err, IsNil) - } - multis, prefixes, err = b.ListMulti("a/", "/") - c.Assert(err, IsNil) - c.Assert(prefixes, IsNil) - c.Assert(multis, HasLen, 2) - c.Assert(multis[0].Bucket, Equals, b) - c.Assert(multis[0].Key, Equals, "a/multi2") - c.Assert(multis[0].UploadId, Matches, ".+") - c.Assert(multis[1].Bucket, Equals, b) - c.Assert(multis[1].Key, Equals, "a/multi3") - c.Assert(multis[1].UploadId, Matches, ".+") -} - -func (s *ClientTests) TestMultiPutAllZeroLength(c *C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.Private) - c.Assert(err, IsNil) - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - defer multi.Abort() - - // This tests an edge case. Amazon requires at least one - // part for multiprat uploads to work, even the part is empty. - parts, err := multi.PutAll(strings.NewReader(""), 5*1024*1024) - c.Assert(err, IsNil) - c.Assert(parts, HasLen, 1) - c.Assert(parts[0].Size, Equals, int64(0)) - c.Assert(parts[0].ETag, Equals, `"d41d8cd98f00b204e9800998ecf8427e"`) - - err = multi.Complete(parts) - c.Assert(err, IsNil) -} diff --git a/Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3t_test.go b/Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3t_test.go deleted file mode 100644 index 37a08ba40..000000000 --- a/Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3t_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package s3_test - -import ( - . "gopkg.in/check.v1" - - "gopkg.in/amz.v3/aws" - "gopkg.in/amz.v3/s3" - "gopkg.in/amz.v3/s3/s3test" -) - -type LocalServer struct { - auth aws.Auth - region aws.Region - srv *s3test.Server - config *s3test.Config -} - -func (s *LocalServer) SetUp(c *C) { - srv, err := s3test.NewServer(s.config) - c.Assert(err, IsNil) - c.Assert(srv, NotNil) - - s.srv = srv - s.region = aws.Region{ - Name: "faux-region-1", - S3Endpoint: srv.URL(), - S3LocationConstraint: true, // s3test server requires a LocationConstraint - } -} - -// LocalServerSuite defines tests that will run -// against the local s3test server. It includes -// selected tests from ClientTests; -// when the s3test functionality is sufficient, it should -// include all of them, and ClientTests can be simply embedded. -type LocalServerSuite struct { - srv LocalServer - clientTests ClientTests -} - -var ( - // run tests twice, once in us-east-1 mode, once not. - _ = Suite(&LocalServerSuite{}) - _ = Suite(&LocalServerSuite{ - srv: LocalServer{ - config: &s3test.Config{ - Send409Conflict: true, - }, - }, - }) -) - -func (s *LocalServerSuite) SetUpSuite(c *C) { - s.srv.SetUp(c) - s.clientTests.s3 = s3.New(s.srv.auth, s.srv.region) - s.clientTests.Cleanup() -} - -func (s *LocalServerSuite) TearDownTest(c *C) { - s.clientTests.Cleanup() -} - -func (s *LocalServerSuite) TestBasicFunctionality(c *C) { - s.clientTests.TestBasicFunctionality(c) -} - -func (s *LocalServerSuite) TestGetNotFound(c *C) { - s.clientTests.TestGetNotFound(c) -} - -func (s *LocalServerSuite) TestBucketList(c *C) { - s.clientTests.TestBucketList(c) -} - -func (s *LocalServerSuite) TestDoublePutBucket(c *C) { - s.clientTests.TestDoublePutBucket(c) -} diff --git a/Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3test/server.go b/Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3test/server.go deleted file mode 100644 index 5080c8df6..000000000 --- a/Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3test/server.go +++ /dev/null @@ -1,629 +0,0 @@ -package s3test - -import ( - "bytes" - "crypto/md5" - "encoding/hex" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - "sync" - "time" - - "gopkg.in/amz.v3/s3" -) - -const debug = false - -type s3Error struct { - statusCode int - XMLName struct{} `xml:"Error"` - Code string - Message string - BucketName string - RequestId string - HostId string -} - -type action struct { - srv *Server - w http.ResponseWriter - req *http.Request - reqId string -} - -// Config controls the internal behaviour of the Server. A nil config is the default -// and behaves as if all configurations assume their default behaviour. Once passed -// to NewServer, the configuration must not be modified. -type Config struct { - // Send409Conflict controls how the Server will respond to calls to PUT on a - // previously existing bucket. The default is false, and corresponds to the - // us-east-1 s3 enpoint. Setting this value to true emulates the behaviour of - // all other regions. - // http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html - Send409Conflict bool -} - -func (c *Config) send409Conflict() bool { - if c != nil { - return c.Send409Conflict - } - return false -} - -// Server is a fake S3 server for testing purposes. -// All of the data for the server is kept in memory. -type Server struct { - url string - reqId int - listener net.Listener - mu sync.Mutex - buckets map[string]*bucket - config *Config -} - -type bucket struct { - name string - acl s3.ACL - ctime time.Time - objects map[string]*object -} - -type object struct { - name string - mtime time.Time - meta http.Header // metadata to return with requests. - checksum []byte // also held as Content-MD5 in meta. - data []byte -} - -// A resource encapsulates the subject of an HTTP request. -// The resource referred to may or may not exist -// when the request is made. -type resource interface { - put(a *action) interface{} - get(a *action) interface{} - post(a *action) interface{} - delete(a *action) interface{} -} - -func NewServer(config *Config) (*Server, error) { - l, err := net.Listen("tcp", "localhost:0") - if err != nil { - return nil, fmt.Errorf("cannot listen on localhost: %v", err) - } - srv := &Server{ - listener: l, - url: "http://" + l.Addr().String(), - buckets: make(map[string]*bucket), - config: config, - } - go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - srv.serveHTTP(w, req) - })) - return srv, nil -} - -// Quit closes down the server. -func (srv *Server) Quit() { - srv.listener.Close() -} - -// URL returns a URL for the server. -func (srv *Server) URL() string { - return srv.url -} - -func fatalf(code int, codeStr string, errf string, a ...interface{}) { - panic(&s3Error{ - statusCode: code, - Code: codeStr, - Message: fmt.Sprintf(errf, a...), - }) -} - -// serveHTTP serves the S3 protocol. -func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) { - // ignore error from ParseForm as it's usually spurious. - req.ParseForm() - - srv.mu.Lock() - defer srv.mu.Unlock() - - if debug { - log.Printf("s3test %q %q", req.Method, req.URL) - } - a := &action{ - srv: srv, - w: w, - req: req, - reqId: fmt.Sprintf("%09X", srv.reqId), - } - srv.reqId++ - - var r resource - defer func() { - switch err := recover().(type) { - case *s3Error: - switch r := r.(type) { - case objectResource: - err.BucketName = r.bucket.name - case bucketResource: - err.BucketName = r.name - } - err.RequestId = a.reqId - // TODO HostId - w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`) - w.WriteHeader(err.statusCode) - xmlMarshal(w, err) - case nil: - default: - panic(err) - } - }() - - r = srv.resourceForURL(req.URL) - - var resp interface{} - switch req.Method { - case "PUT": - resp = r.put(a) - case "GET", "HEAD": - resp = r.get(a) - case "DELETE": - resp = r.delete(a) - case "POST": - resp = r.post(a) - default: - fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method) - } - if resp != nil && req.Method != "HEAD" { - xmlMarshal(w, resp) - } -} - -// xmlMarshal is the same as xml.Marshal except that -// it panics on error. The marshalling should not fail, -// but we want to know if it does. -func xmlMarshal(w io.Writer, x interface{}) { - if err := xml.NewEncoder(w).Encode(x); err != nil { - panic(fmt.Errorf("error marshalling %#v: %v", x, err)) - } -} - -// In a fully implemented test server, each of these would have -// its own resource type. -var unimplementedBucketResourceNames = map[string]bool{ - "acl": true, - "lifecycle": true, - "policy": true, - "location": true, - "logging": true, - "notification": true, - "versions": true, - "requestPayment": true, - "versioning": true, - "website": true, - "uploads": true, -} - -var unimplementedObjectResourceNames = map[string]bool{ - "uploadId": true, - "acl": true, - "torrent": true, - "uploads": true, -} - -var pathRegexp = regexp.MustCompile("/(([^/]+)(/(.*))?)?") - -// resourceForURL returns a resource object for the given URL. -func (srv *Server) resourceForURL(u *url.URL) (r resource) { - m := pathRegexp.FindStringSubmatch(u.Path) - if m == nil { - fatalf(404, "InvalidURI", "Couldn't parse the specified URI") - } - bucketName := m[2] - objectName := m[4] - if bucketName == "" { - return nullResource{} // root - } - b := bucketResource{ - name: bucketName, - bucket: srv.buckets[bucketName], - } - q := u.Query() - if objectName == "" { - for name := range q { - if unimplementedBucketResourceNames[name] { - return nullResource{} - } - } - return b - - } - if b.bucket == nil { - fatalf(404, "NoSuchBucket", "The specified bucket does not exist") - } - objr := objectResource{ - name: objectName, - version: q.Get("versionId"), - bucket: b.bucket, - } - for name := range q { - if unimplementedObjectResourceNames[name] { - return nullResource{} - } - } - if obj := objr.bucket.objects[objr.name]; obj != nil { - objr.object = obj - } - return objr -} - -// nullResource has error stubs for all resource methods. -type nullResource struct{} - -func notAllowed() interface{} { - fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource") - return nil -} - -func (nullResource) put(a *action) interface{} { return notAllowed() } -func (nullResource) get(a *action) interface{} { return notAllowed() } -func (nullResource) post(a *action) interface{} { return notAllowed() } -func (nullResource) delete(a *action) interface{} { return notAllowed() } - -const timeFormat = "2006-01-02T15:04:05.000Z07:00" - -type bucketResource struct { - name string - bucket *bucket // non-nil if the bucket already exists. -} - -// GET on a bucket lists the objects in the bucket. -// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html -func (r bucketResource) get(a *action) interface{} { - if r.bucket == nil { - fatalf(404, "NoSuchBucket", "The specified bucket does not exist") - } - delimiter := a.req.Form.Get("delimiter") - marker := a.req.Form.Get("marker") - maxKeys := -1 - if s := a.req.Form.Get("max-keys"); s != "" { - i, err := strconv.Atoi(s) - if err != nil || i < 0 { - fatalf(400, "invalid value for max-keys: %q", s) - } - maxKeys = i - } - prefix := a.req.Form.Get("prefix") - a.w.Header().Set("Content-Type", "application/xml") - - if a.req.Method == "HEAD" { - return nil - } - - var objs orderedObjects - - // first get all matching objects and arrange them in alphabetical order. - for name, obj := range r.bucket.objects { - if strings.HasPrefix(name, prefix) { - objs = append(objs, obj) - } - } - sort.Sort(objs) - - if maxKeys <= 0 { - maxKeys = 1000 - } - resp := &s3.ListResp{ - Name: r.bucket.name, - Prefix: prefix, - Delimiter: delimiter, - Marker: marker, - MaxKeys: maxKeys, - } - - var prefixes []string - for _, obj := range objs { - if !strings.HasPrefix(obj.name, prefix) { - continue - } - name := obj.name - isPrefix := false - if delimiter != "" { - if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 { - name = obj.name[:len(prefix)+i+len(delimiter)] - if prefixes != nil && prefixes[len(prefixes)-1] == name { - continue - } - isPrefix = true - } - } - if name <= marker { - continue - } - if len(resp.Contents)+len(prefixes) >= maxKeys { - resp.IsTruncated = true - break - } - if isPrefix { - prefixes = append(prefixes, name) - } else { - // Contents contains only keys not found in CommonPrefixes - resp.Contents = append(resp.Contents, obj.s3Key()) - } - } - resp.CommonPrefixes = prefixes - return resp -} - -// orderedObjects holds a slice of objects that can be sorted -// by name. -type orderedObjects []*object - -func (s orderedObjects) Len() int { - return len(s) -} -func (s orderedObjects) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s orderedObjects) Less(i, j int) bool { - return s[i].name < s[j].name -} - -func (obj *object) s3Key() s3.Key { - return s3.Key{ - Key: obj.name, - LastModified: obj.mtime.Format(timeFormat), - Size: int64(len(obj.data)), - ETag: fmt.Sprintf(`"%x"`, obj.checksum), - // TODO StorageClass - // TODO Owner - } -} - -// DELETE on a bucket deletes the bucket if it's not empty. -func (r bucketResource) delete(a *action) interface{} { - b := r.bucket - if b == nil { - fatalf(404, "NoSuchBucket", "The specified bucket does not exist") - } - if len(b.objects) > 0 { - fatalf(400, "BucketNotEmpty", "The bucket you tried to delete is not empty") - } - delete(a.srv.buckets, b.name) - return nil -} - -// PUT on a bucket creates the bucket. -// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html -func (r bucketResource) put(a *action) interface{} { - var created bool - if r.bucket == nil { - if !validBucketName(r.name) { - fatalf(400, "InvalidBucketName", "The specified bucket is not valid") - } - if loc := locationConstraint(a); loc == "" { - fatalf(400, "InvalidRequets", "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.") - } - // TODO validate acl - r.bucket = &bucket{ - name: r.name, - // TODO default acl - objects: make(map[string]*object), - } - a.srv.buckets[r.name] = r.bucket - created = true - } - if !created && a.srv.config.send409Conflict() { - fatalf(409, "BucketAlreadyOwnedByYou", "Your previous request to create the named bucket succeeded and you already own it.") - } - r.bucket.acl = s3.ACL(a.req.Header.Get("x-amz-acl")) - return nil -} - -func (bucketResource) post(a *action) interface{} { - fatalf(400, "Method", "bucket POST method not available") - return nil -} - -// validBucketName returns whether name is a valid bucket name. -// Here are the rules, from: -// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html -// -// Can contain lowercase letters, numbers, periods (.), underscores (_), -// and dashes (-). You can use uppercase letters for buckets only in the -// US Standard region. -// -// Must start with a number or letter -// -// Must be between 3 and 255 characters long -// -// There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4) -// but the real S3 server does not seem to check that rule, so we will not -// check it either. -// -func validBucketName(name string) bool { - if len(name) < 3 || len(name) > 255 { - return false - } - r := name[0] - if !(r >= '0' && r <= '9' || r >= 'a' && r <= 'z') { - return false - } - for _, r := range name { - switch { - case r >= '0' && r <= '9': - case r >= 'a' && r <= 'z': - case r == '_' || r == '-': - case r == '.': - default: - return false - } - } - return true -} - -var responseParams = map[string]bool{ - "content-type": true, - "content-language": true, - "expires": true, - "cache-control": true, - "content-disposition": true, - "content-encoding": true, -} - -type objectResource struct { - name string - version string - bucket *bucket // always non-nil. - object *object // may be nil. -} - -// GET on an object gets the contents of the object. -// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html -func (objr objectResource) get(a *action) interface{} { - obj := objr.object - if obj == nil { - fatalf(404, "NoSuchKey", "The specified key does not exist.") - } - h := a.w.Header() - // add metadata - for name, d := range obj.meta { - h[name] = d - } - // override header values in response to request parameters. - for name, vals := range a.req.Form { - if strings.HasPrefix(name, "response-") { - name = name[len("response-"):] - if !responseParams[name] { - continue - } - h.Set(name, vals[0]) - } - } - if r := a.req.Header.Get("Range"); r != "" { - fatalf(400, "NotImplemented", "range unimplemented") - } - // TODO Last-Modified-Since - // TODO If-Modified-Since - // TODO If-Unmodified-Since - // TODO If-Match - // TODO If-None-Match - // TODO Connection: close ?? - // TODO x-amz-request-id - h.Set("Content-Length", fmt.Sprint(len(obj.data))) - h.Set("ETag", hex.EncodeToString(obj.checksum)) - h.Set("Last-Modified", obj.mtime.Format(time.RFC1123)) - if a.req.Method == "HEAD" { - return nil - } - // TODO avoid holding the lock when writing data. - _, err := a.w.Write(obj.data) - if err != nil { - // we can't do much except just log the fact. - log.Printf("error writing data: %v", err) - } - return nil -} - -var metaHeaders = map[string]bool{ - "Content-MD5": true, - "x-amz-acl": true, - "Content-Type": true, - "Content-Encoding": true, - "Content-Disposition": true, -} - -// PUT on an object creates the object. -func (objr objectResource) put(a *action) interface{} { - // TODO Cache-Control header - // TODO Expires header - // TODO x-amz-server-side-encryption - // TODO x-amz-storage-class - - // TODO is this correct, or should we erase all previous metadata? - obj := objr.object - if obj == nil { - obj = &object{ - name: objr.name, - meta: make(http.Header), - } - } - - var expectHash []byte - if c := a.req.Header.Get("Content-MD5"); c != "" { - var err error - expectHash, err = hex.DecodeString(c) - if err != nil || len(expectHash) != md5.Size { - fatalf(400, "InvalidDigest", "The Content-MD5 you specified was invalid") - } - } - sum := md5.New() - // TODO avoid holding lock while reading data. - data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum)) - if err != nil { - fatalf(400, "TODO", "read error") - } - gotHash := sum.Sum(nil) - if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 { - fatalf(400, "BadDigest", "The Content-MD5 you specified did not match what we received") - } - if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength { - fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header") - } - - // PUT request has been successful - save data and metadata - for key, values := range a.req.Header { - key = http.CanonicalHeaderKey(key) - if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") { - obj.meta[key] = values - } - } - obj.data = data - obj.checksum = gotHash - obj.mtime = time.Now() - objr.bucket.objects[objr.name] = obj - return nil -} - -func (objr objectResource) delete(a *action) interface{} { - delete(objr.bucket.objects, objr.name) - return nil -} - -func (objr objectResource) post(a *action) interface{} { - fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource") - return nil -} - -type CreateBucketConfiguration struct { - LocationConstraint string -} - -// locationConstraint parses the request body (if present). -// If there is no body, an empty string will be returned. -func locationConstraint(a *action) string { - var body bytes.Buffer - if _, err := io.Copy(&body, a.req.Body); err != nil { - fatalf(400, "InvalidRequest", err.Error()) - } - if body.Len() == 0 { - return "" - } - var loc CreateBucketConfiguration - if err := xml.NewDecoder(&body).Decode(&loc); err != nil { - fatalf(400, "InvalidRequest", err.Error()) - } - return loc.LocationConstraint -} diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 2f5d2d786..0c8795bc5 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -3,13 +3,12 @@ package s3 import ( "bytes" "errors" - "fmt" "io" "io/ioutil" + "os" "strings" - "gopkg.in/amz.v3/aws" - "gopkg.in/amz.v3/s3" + "github.com/minio/minio-go" "github.com/restic/restic/backend" ) @@ -26,41 +25,47 @@ func s3path(t backend.Type, name string) string { } type S3Backend struct { - bucket *s3.Bucket - connChan chan struct{} - path string + s3api minio.API + connChan chan struct{} + bucketname string } -// Open a backend using an S3 bucket object -func OpenS3Bucket(bucket *s3.Bucket, bucketname string) *S3Backend { +// Open opens the S3 backend at bucket and region. +func Open(regionname, bucketname string) (backend.Backend, error) { + config := minio.Config{ + AccessKeyID: os.Getenv("AWS_ACCESS_KEY_ID"), + SecretAccessKey: os.Getenv("AWS_SECRET_ACCESS_KEY"), + } + + if !strings.Contains(regionname, ".") { + // Amazon region name + switch regionname { + case "us-east-1": + config.Endpoint = "https://s3.amazonaws.com" + default: + config.Endpoint = "https://s3-" + regionname + ".amazonaws.com" + } + } else { + // S3 compatible endpoint + config.Endpoint = "https://" + regionname + } + + s3api, s3err := minio.New(config) + if s3err != nil { + return nil, s3err + } + connChan := make(chan struct{}, connLimit) for i := 0; i < connLimit; i++ { connChan <- struct{}{} } - return &S3Backend{bucket: bucket, path: bucketname, connChan: connChan} -} - -// Open opens the S3 backend at bucket and region. -func Open(regionname, bucketname string) (backend.Backend, error) { - auth, err := aws.EnvAuth() - if err != nil { - return nil, err - } - - client := s3.New(auth, aws.Regions[regionname]) - - s3bucket, s3err := client.Bucket(bucketname) - if s3err != nil { - return nil, s3err - } - - return OpenS3Bucket(s3bucket, bucketname), nil + return &S3Backend{s3api: s3api, bucketname: bucketname, connChan: connChan}, nil } // Location returns this backend's location (the bucket name). func (be *S3Backend) Location() string { - return be.path + return be.bucketname } type s3Blob struct { @@ -102,13 +107,13 @@ func (bb *s3Blob) Finalize(t backend.Type, name string) error { path := s3path(t, name) // Check key does not already exist - _, err := bb.b.bucket.GetReader(path) + _, err := bb.b.s3api.StatObject(bb.b.bucketname, path) if err == nil { return errors.New("key already exists!") } <-bb.b.connChan - err = bb.b.bucket.PutReader(path, bb.buf, int64(bb.buf.Len()), "binary/octet-stream", "private") + err = bb.b.s3api.PutObject(bb.b.bucketname, path, "application/octet-stream", int64(bb.buf.Len()), bb.buf) bb.b.connChan <- struct{}{} bb.buf.Reset() return err @@ -129,36 +134,25 @@ func (be *S3Backend) Create() (backend.Blob, error) { // name. The reader should be closed after draining it. func (be *S3Backend) Get(t backend.Type, name string) (io.ReadCloser, error) { path := s3path(t, name) - return be.bucket.GetReader(path) + r, _, err := be.s3api.GetObject(be.bucketname, path) + rc := ioutil.NopCloser(r) + return rc, err } // GetReader returns an io.ReadCloser for the Blob with the given name of // type t at offset and length. If length is 0, the reader reads until EOF. func (be *S3Backend) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) { - rc, err := be.Get(t, name) - if err != nil { - return nil, err - } - - n, errc := io.CopyN(ioutil.Discard, rc, int64(offset)) - if errc != nil { - return nil, errc - } else if n != int64(offset) { - return nil, fmt.Errorf("less bytes read than expected, read: %d, expected: %d", n, offset) - } - - if length == 0 { - return rc, nil - } - - return backend.LimitReadCloser(rc, int64(length)), nil + path := s3path(t, name) + r, _, err := be.s3api.GetPartialObject(be.bucketname, path, int64(offset), int64(length)) + rc := ioutil.NopCloser(r) + return rc, err } // Test returns true if a blob of the given type and name exists in the backend. func (be *S3Backend) Test(t backend.Type, name string) (bool, error) { found := false path := s3path(t, name) - _, err := be.bucket.GetReader(path) + _, err := be.s3api.StatObject(be.bucketname, path) if err == nil { found = true } @@ -170,7 +164,7 @@ func (be *S3Backend) Test(t backend.Type, name string) (bool, error) { // Remove removes the blob with the given name and type. func (be *S3Backend) Remove(t backend.Type, name string) error { path := s3path(t, name) - return be.bucket.Del(path) + return be.s3api.RemoveObject(be.bucketname, path) } // List returns a channel that yields all names of blobs of type t. A @@ -181,34 +175,12 @@ func (be *S3Backend) List(t backend.Type, done <-chan struct{}) <-chan string { prefix := s3path(t, "") - listresp, err := be.bucket.List(prefix, "/", "", maxKeysInList) - - if err != nil { - close(ch) - return ch - } - - matches := make([]string, len(listresp.Contents)) - for idx, key := range listresp.Contents { - matches[idx] = strings.TrimPrefix(key.Key, prefix) - } - - // Continue making requests to get full list. - for listresp.IsTruncated { - listresp, err = be.bucket.List(prefix, "/", listresp.NextMarker, maxKeysInList) - if err != nil { - close(ch) - return ch - } - - for _, key := range listresp.Contents { - matches = append(matches, strings.TrimPrefix(key.Key, prefix)) - } - } + listresp := be.s3api.ListObjects(be.bucketname, prefix, true) go func() { defer close(ch) - for _, m := range matches { + for obj := range listresp { + m := strings.TrimPrefix(obj.Stat.Key, prefix) if m == "" { continue } diff --git a/backend/s3_test.go b/backend/s3_test.go index afaa5f372..2122b3a64 100644 --- a/backend/s3_test.go +++ b/backend/s3_test.go @@ -3,48 +3,12 @@ package backend_test import ( "testing" - "gopkg.in/amz.v3/aws" - "gopkg.in/amz.v3/s3" - "gopkg.in/amz.v3/s3/s3test" - bes3 "github.com/restic/restic/backend/s3" . "github.com/restic/restic/test" ) -type LocalServer struct { - auth aws.Auth - region aws.Region - srv *s3test.Server - config *s3test.Config -} - -var s LocalServer - func setupS3Backend(t *testing.T) *bes3.S3Backend { - s.config = &s3test.Config{ - Send409Conflict: true, - } - srv, err := s3test.NewServer(s.config) - OK(t, err) - s.srv = srv - - s.region = aws.Region{ - Name: "faux-region-1", - S3Endpoint: srv.URL(), - S3LocationConstraint: true, // s3test server requires a LocationConstraint - } - - s.auth = aws.Auth{"abc", "123"} - - service := s3.New(s.auth, s.region) - bucket, berr := service.Bucket("testbucket") - OK(t, berr) - err = bucket.PutBucket("private") - OK(t, err) - - t.Logf("created s3 backend locally") - - return bes3.OpenS3Bucket(bucket, "testbucket") + return bes3.Open("play.minio.io:9000", "restictestbucket") } func TestS3Backend(t *testing.T) { From ed2a4ba1d5cb50ef39580fd1592d6b6cde9b3cd6 Mon Sep 17 00:00:00 2001 From: Chris Howey Date: Fri, 6 Nov 2015 16:00:10 -0600 Subject: [PATCH 02/55] Fix s3 backend test --- backend/s3_test.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/backend/s3_test.go b/backend/s3_test.go index 2122b3a64..cf1aeaa1c 100644 --- a/backend/s3_test.go +++ b/backend/s3_test.go @@ -4,15 +4,14 @@ import ( "testing" bes3 "github.com/restic/restic/backend/s3" - . "github.com/restic/restic/test" ) -func setupS3Backend(t *testing.T) *bes3.S3Backend { - return bes3.Open("play.minio.io:9000", "restictestbucket") -} - func TestS3Backend(t *testing.T) { - s := setupS3Backend(t) + s, err := bes3.Open("play.minio.io:9000", "restictestbucket") + + if err != nil { + t.Fatal(err) + } testBackend(s, t) } From e2445f4c97ea70231f1f0f33536166f3c8293efa Mon Sep 17 00:00:00 2001 From: Chris Howey Date: Fri, 6 Nov 2015 17:46:59 -0600 Subject: [PATCH 03/55] GetPartialObject does not work. --- backend/s3/s3.go | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 0c8795bc5..b5b7895fb 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -3,6 +3,7 @@ package s3 import ( "bytes" "errors" + "fmt" "io" "io/ioutil" "os" @@ -113,7 +114,7 @@ func (bb *s3Blob) Finalize(t backend.Type, name string) error { } <-bb.b.connChan - err = bb.b.s3api.PutObject(bb.b.bucketname, path, "application/octet-stream", int64(bb.buf.Len()), bb.buf) + err = bb.b.s3api.PutObject(bb.b.bucketname, path, "binary/octet-stream", int64(bb.buf.Len()), bb.buf) bb.b.connChan <- struct{}{} bb.buf.Reset() return err @@ -134,18 +135,34 @@ func (be *S3Backend) Create() (backend.Blob, error) { // name. The reader should be closed after draining it. func (be *S3Backend) Get(t backend.Type, name string) (io.ReadCloser, error) { path := s3path(t, name) - r, _, err := be.s3api.GetObject(be.bucketname, path) - rc := ioutil.NopCloser(r) + rc, _, err := be.s3api.GetObject(be.bucketname, path) return rc, err } // GetReader returns an io.ReadCloser for the Blob with the given name of // type t at offset and length. If length is 0, the reader reads until EOF. func (be *S3Backend) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) { - path := s3path(t, name) - r, _, err := be.s3api.GetPartialObject(be.bucketname, path, int64(offset), int64(length)) - rc := ioutil.NopCloser(r) - return rc, err + rc, err := be.Get(t, name) + if err != nil { + return nil, err + + } + + n, errc := io.CopyN(ioutil.Discard, rc, int64(offset)) + if errc != nil { + return nil, errc + + } else if n != int64(offset) { + return nil, fmt.Errorf("less bytes read than expected, read: %d, expected: %d", n, offset) + + } + + if length == 0 { + return rc, nil + + } + + return backend.LimitReadCloser(rc, int64(length)), nil } // Test returns true if a blob of the given type and name exists in the backend. From 69a9adc4c3575a8108002fc08601ffa7af7aa6b1 Mon Sep 17 00:00:00 2001 From: Chris Howey Date: Sat, 7 Nov 2015 06:43:15 -0600 Subject: [PATCH 04/55] Use local instance of minio server. Need to figure out how to have tests automatically start and kill server. --- Godeps/Godeps.json | 3 +-- backend/s3/s3.go | 32 ++++++++------------------------ backend/s3_test.go | 21 +++++++++++++++++++-- 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 01bcb29b5..b818dc01e 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -24,8 +24,7 @@ }, { "ImportPath": "github.com/minio/minio-go", - "Comment": "v0.2.5-58-g5c3a491", - "Rev": "5c3a4919116141f088990bd6ee385877648c7a25" + "Rev": "61f6570da0edd761974216c9ed5da485d3cc0c99" }, { "ImportPath": "github.com/pkg/sftp", diff --git a/backend/s3/s3.go b/backend/s3/s3.go index b5b7895fb..35c44a55a 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -3,9 +3,7 @@ package s3 import ( "bytes" "errors" - "fmt" "io" - "io/ioutil" "os" "strings" @@ -48,7 +46,11 @@ func Open(regionname, bucketname string) (backend.Backend, error) { } } else { // S3 compatible endpoint - config.Endpoint = "https://" + regionname + if strings.Contains(regionname, "localhost") || strings.Contains(regionname, "127.0.0.1") { + config.Endpoint = "http://" + regionname + } else { + config.Endpoint = "https://" + regionname + } } s3api, s3err := minio.New(config) @@ -142,27 +144,9 @@ func (be *S3Backend) Get(t backend.Type, name string) (io.ReadCloser, error) { // GetReader returns an io.ReadCloser for the Blob with the given name of // type t at offset and length. If length is 0, the reader reads until EOF. func (be *S3Backend) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) { - rc, err := be.Get(t, name) - if err != nil { - return nil, err - - } - - n, errc := io.CopyN(ioutil.Discard, rc, int64(offset)) - if errc != nil { - return nil, errc - - } else if n != int64(offset) { - return nil, fmt.Errorf("less bytes read than expected, read: %d, expected: %d", n, offset) - - } - - if length == 0 { - return rc, nil - - } - - return backend.LimitReadCloser(rc, int64(length)), nil + path := s3path(t, name) + rc, _, err := be.s3api.GetPartialObject(be.bucketname, path, int64(offset), int64(length)) + return rc, err } // Test returns true if a blob of the given type and name exists in the backend. diff --git a/backend/s3_test.go b/backend/s3_test.go index cf1aeaa1c..dbcfb3c4b 100644 --- a/backend/s3_test.go +++ b/backend/s3_test.go @@ -1,17 +1,34 @@ package backend_test import ( + "os" "testing" + "github.com/minio/minio-go" bes3 "github.com/restic/restic/backend/s3" + . "github.com/restic/restic/test" ) func TestS3Backend(t *testing.T) { - s, err := bes3.Open("play.minio.io:9000", "restictestbucket") - + config := minio.Config{ + AccessKeyID: os.Getenv("AWS_ACCESS_KEY_ID"), + SecretAccessKey: os.Getenv("AWS_SECRET_ACCESS_KEY"), + Endpoint: "http://localhost:9000", + } + s3Client, err := minio.New(config) if err != nil { t.Fatal(err) } + bucketname := "restictestbucket" + + err = s3Client.MakeBucket(bucketname, "") + if err != nil { + t.Fatal(err) + } + + s, err := bes3.Open("127.0.0.1:9000", bucketname) + OK(t, err) + testBackend(s, t) } From d0ca118387cabe75b56c8199a8df78fa1e8ea85e Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 6 Dec 2015 23:13:22 +0100 Subject: [PATCH 05/55] Fix usage of the `done` chan --- backend/s3/s3.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 35c44a55a..f2bbff50b 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -203,7 +203,7 @@ func (be *S3Backend) removeKeys(t backend.Type) { for key := range be.List(backend.Data, doneChan) { be.Remove(backend.Data, key) } - doneChan <- struct{}{} + close(doneChan) } // Delete removes all restic keys From 55f10eb1c186dfc6206bf04d724635e6d2a2fdc1 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 6 Dec 2015 23:21:48 +0100 Subject: [PATCH 06/55] Fix s3 test with local minio server instance --- backend/s3/s3.go | 109 ++++++++++++++++++++++++++++++++------------- backend/s3_test.go | 32 +++++-------- 2 files changed, 88 insertions(+), 53 deletions(-) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index f2bbff50b..1fb624633 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -29,41 +29,70 @@ type S3Backend struct { bucketname string } -// Open opens the S3 backend at bucket and region. -func Open(regionname, bucketname string) (backend.Backend, error) { +func getConfig(region, bucket string) minio.Config { config := minio.Config{ AccessKeyID: os.Getenv("AWS_ACCESS_KEY_ID"), SecretAccessKey: os.Getenv("AWS_SECRET_ACCESS_KEY"), + Region: "us-east-1", } - if !strings.Contains(regionname, ".") { + if !strings.Contains(region, ".") { // Amazon region name - switch regionname { + switch region { case "us-east-1": config.Endpoint = "https://s3.amazonaws.com" default: - config.Endpoint = "https://s3-" + regionname + ".amazonaws.com" + config.Endpoint = "https://s3-" + region + ".amazonaws.com" + config.Region = region } } else { - // S3 compatible endpoint - if strings.Contains(regionname, "localhost") || strings.Contains(regionname, "127.0.0.1") { - config.Endpoint = "http://" + regionname + // S3 compatible endpoint, use default region "us-east-1" + if strings.Contains(region, "localhost") || strings.Contains(region, "127.0.0.1") { + config.Endpoint = "http://" + region } else { - config.Endpoint = "https://" + regionname + config.Endpoint = "https://" + region } } - s3api, s3err := minio.New(config) - if s3err != nil { - return nil, s3err + return config +} + +// Open opens the S3 backend at bucket and region. +func Open(regionname, bucketname string) (backend.Backend, error) { + s3api, err := minio.New(getConfig(regionname, bucketname)) + if err != nil { + return nil, err } - connChan := make(chan struct{}, connLimit) + be := &S3Backend{s3api: s3api, bucketname: bucketname} + be.createConnections() + + return be, nil +} + +// Create creates a new bucket in the given region and opens the backend. +func Create(regionname, bucketname string) (backend.Backend, error) { + s3api, err := minio.New(getConfig(regionname, bucketname)) + if err != nil { + return nil, err + } + + be := &S3Backend{s3api: s3api, bucketname: bucketname} + be.createConnections() + + err = s3api.MakeBucket(bucketname, "") + if err != nil { + return nil, err + } + + return be, nil +} + +func (be *S3Backend) createConnections() { + be.connChan = make(chan struct{}, connLimit) for i := 0; i < connLimit; i++ { - connChan <- struct{}{} + be.connChan <- struct{}{} } - - return &S3Backend{s3api: s3api, bucketname: bucketname, connChan: connChan}, nil } // Location returns this backend's location (the bucket name). @@ -112,7 +141,7 @@ func (bb *s3Blob) Finalize(t backend.Type, name string) error { // Check key does not already exist _, err := bb.b.s3api.StatObject(bb.b.bucketname, path) if err == nil { - return errors.New("key already exists!") + return errors.New("key already exists") } <-bb.b.connChan @@ -197,24 +226,42 @@ func (be *S3Backend) List(t backend.Type, done <-chan struct{}) <-chan string { return ch } -// Remove keys for a specified backend type -func (be *S3Backend) removeKeys(t backend.Type) { - doneChan := make(chan struct{}) - for key := range be.List(backend.Data, doneChan) { - be.Remove(backend.Data, key) +// Remove keys for a specified backend type. +func (be *S3Backend) removeKeys(t backend.Type) error { + done := make(chan struct{}) + defer close(done) + for key := range be.List(backend.Data, done) { + err := be.Remove(backend.Data, key) + if err != nil { + return err + } } - close(doneChan) + + return nil } -// Delete removes all restic keys +// Delete removes all restic keys and the bucket. func (be *S3Backend) Delete() error { - be.removeKeys(backend.Data) - be.removeKeys(backend.Key) - be.removeKeys(backend.Lock) - be.removeKeys(backend.Snapshot) - be.removeKeys(backend.Index) - be.removeKeys(backend.Config) - return nil + alltypes := []backend.Type{ + backend.Data, + backend.Key, + backend.Lock, + backend.Snapshot, + backend.Index} + + for _, t := range alltypes { + err := be.removeKeys(t) + if err != nil { + return nil + } + } + + err := be.Remove(backend.Config, "") + if err != nil { + return err + } + + return be.s3api.RemoveBucket(be.bucketname) } // Close does nothing diff --git a/backend/s3_test.go b/backend/s3_test.go index dbcfb3c4b..4826769b2 100644 --- a/backend/s3_test.go +++ b/backend/s3_test.go @@ -1,34 +1,22 @@ package backend_test import ( - "os" "testing" - "github.com/minio/minio-go" - bes3 "github.com/restic/restic/backend/s3" + "github.com/restic/restic/backend/s3" . "github.com/restic/restic/test" ) +type deleter interface { + Delete() error +} + func TestS3Backend(t *testing.T) { - config := minio.Config{ - AccessKeyID: os.Getenv("AWS_ACCESS_KEY_ID"), - SecretAccessKey: os.Getenv("AWS_SECRET_ACCESS_KEY"), - Endpoint: "http://localhost:9000", - } - s3Client, err := minio.New(config) - if err != nil { - t.Fatal(err) - } - - bucketname := "restictestbucket" - - err = s3Client.MakeBucket(bucketname, "") - if err != nil { - t.Fatal(err) - } - - s, err := bes3.Open("127.0.0.1:9000", bucketname) + be, err := s3.Create("127.0.0.1:9000", "restictestbucket") OK(t, err) - testBackend(s, t) + testBackend(be, t) + + del := be.(deleter) + OK(t, del.Delete()) } From 248f991ad463e5f37c3657cec3c89845f4bdcd30 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 19 Dec 2015 13:23:05 +0100 Subject: [PATCH 07/55] s3: don't remove the bucket on Delete() --- backend/s3/s3.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 1fb624633..4687de387 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -240,7 +240,7 @@ func (be *S3Backend) removeKeys(t backend.Type) error { return nil } -// Delete removes all restic keys and the bucket. +// Delete removes all restic keys in the bucket. It will not remove the bucket itself. func (be *S3Backend) Delete() error { alltypes := []backend.Type{ backend.Data, @@ -256,12 +256,7 @@ func (be *S3Backend) Delete() error { } } - err := be.Remove(backend.Config, "") - if err != nil { - return err - } - - return be.s3api.RemoveBucket(be.bucketname) + return be.Remove(backend.Config, "") } // Close does nothing From 5736742c3ea40bef93e9c484b49f9d249210ea7b Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 19 Dec 2015 13:26:42 +0100 Subject: [PATCH 08/55] s3: Open() creates bucket if it does not exist --- backend/s3/s3.go | 15 +-------------- backend/s3_test.go | 2 +- 2 files changed, 2 insertions(+), 15 deletions(-) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 4687de387..f98942a8e 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -57,7 +57,7 @@ func getConfig(region, bucket string) minio.Config { return config } -// Open opens the S3 backend at bucket and region. +// Open opens the S3 backend at bucket and region. The bucket is created if it does not exist yet. func Open(regionname, bucketname string) (backend.Backend, error) { s3api, err := minio.New(getConfig(regionname, bucketname)) if err != nil { @@ -67,19 +67,6 @@ func Open(regionname, bucketname string) (backend.Backend, error) { be := &S3Backend{s3api: s3api, bucketname: bucketname} be.createConnections() - return be, nil -} - -// Create creates a new bucket in the given region and opens the backend. -func Create(regionname, bucketname string) (backend.Backend, error) { - s3api, err := minio.New(getConfig(regionname, bucketname)) - if err != nil { - return nil, err - } - - be := &S3Backend{s3api: s3api, bucketname: bucketname} - be.createConnections() - err = s3api.MakeBucket(bucketname, "") if err != nil { return nil, err diff --git a/backend/s3_test.go b/backend/s3_test.go index 4826769b2..0170f126e 100644 --- a/backend/s3_test.go +++ b/backend/s3_test.go @@ -12,7 +12,7 @@ type deleter interface { } func TestS3Backend(t *testing.T) { - be, err := s3.Create("127.0.0.1:9000", "restictestbucket") + be, err := s3.Open("127.0.0.1:9000", "restictestbucket") OK(t, err) testBackend(be, t) From c22c0f27069f10fd87edeefd18ec4231abdd881b Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 19 Dec 2015 15:57:22 +0100 Subject: [PATCH 09/55] Add Dockerfile that resembles the Travis environment --- Dockerfile | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 Dockerfile diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..fe76898b5 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,55 @@ +# This Dockerfiles configures a container that is similar to the Travis CI +# environment and can be used to run tests locally. +# +# build the image: +# docker build -t restic/test . +# +# run tests: +# docker run --rm -v $PWD:/home/travis/gopath/src/github.com/restic/restic restic/test go run run_integration_tests.go +# +# run interactively with: +# docker run --interactive --tty --rm -v $PWD:/home/travis/gopath/src/github.com/restic/restic restic/test /bin/bash +# +# run a tests: +# docker run --rm -v $PWD:/home/travis/gopath/src/github.com/restic/restic restic/test go test -v ./backend + +FROM ubuntu:14.04 + +ARG GOVERSION=1.5.2 +ARG GOARCH=amd64 + +# install dependencies +RUN apt-get update +RUN apt-get install -y --no-install-recommends ca-certificates wget git + +# download and install Go +RUN wget -q -O /tmp/go.tar.gz https://storage.googleapis.com/golang/go${GOVERSION}.linux-${GOARCH}.tar.gz +RUN cd /var/lib && tar xf /tmp/go.tar.gz +RUN rm -f /tmp/go.tar.gz + +ENV GOROOT /var/lib/go +ENV PATH $PATH:$GOROOT/bin + +# add and configure user +ENV HOME /home/travis +ENV GOPATH $HOME/gopath +ENV PATH $PATH:$GOPATH/bin +RUN useradd -m -d $HOME -s /bin/bash travis + +# run everything below as user travis +USER travis +WORKDIR $HOME + +# make gopath +RUN mkdir -p $GOPATH/src/github.com/restic/restic + +# install tools +RUN go get golang.org/x/tools/cmd/cover +RUN go get github.com/mattn/goveralls +RUN go get github.com/mitchellh/gox + +# set TRAVIS_BUILD_DIR for integration script +ENV TRAVIS_BUILD_DIR $GOPATH/src/github.com/restic/restic +ENV GOPATH $GOPATH:${TRAVIS_BUILD_DIR}/Godeps/_workspace + +WORKDIR $TRAVIS_BUILD_DIR From fa7192fdfb4ad13f0e1752ca40df7c90e93fb6df Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 19 Dec 2015 16:00:23 +0100 Subject: [PATCH 10/55] CI: save cross-compiled binaries in /tmp --- run_integration_tests.go | 1 + 1 file changed, 1 insertion(+) diff --git a/run_integration_tests.go b/run_integration_tests.go index ed0618807..d8d03bb14 100644 --- a/run_integration_tests.go +++ b/run_integration_tests.go @@ -72,6 +72,7 @@ func (env *TravisEnvironment) RunTests() { "-os", strings.Join(env.goxOS, " "), "-arch", strings.Join(env.goxArch, " "), "-tags", tags, + "-output", "/tmp/{{.Dir}}_{{.OS}}_{{.Arch}}", "./cmd/restic") } From 8562a1bb2f88f9ae0bfb177548912cef63d00a7d Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 19 Dec 2015 17:21:35 +0100 Subject: [PATCH 11/55] Dockerfile: Also install minio --- Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index fe76898b5..dc70958d1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -20,7 +20,7 @@ ARG GOARCH=amd64 # install dependencies RUN apt-get update -RUN apt-get install -y --no-install-recommends ca-certificates wget git +RUN apt-get install -y --no-install-recommends ca-certificates wget git build-essential # download and install Go RUN wget -q -O /tmp/go.tar.gz https://storage.googleapis.com/golang/go${GOVERSION}.linux-${GOARCH}.tar.gz @@ -47,6 +47,7 @@ RUN mkdir -p $GOPATH/src/github.com/restic/restic RUN go get golang.org/x/tools/cmd/cover RUN go get github.com/mattn/goveralls RUN go get github.com/mitchellh/gox +RUN GO15VENDOREXPERIMENT=1 go get github.com/minio/minio # set TRAVIS_BUILD_DIR for integration script ENV TRAVIS_BUILD_DIR $GOPATH/src/github.com/restic/restic From edfb31f4fed4805ffbaf0d2a4d03c1f02199ec72 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 19 Dec 2015 17:21:45 +0100 Subject: [PATCH 12/55] s3: Run integration test with minio server --- backend/s3_test.go | 6 ++- run_integration_tests.go | 113 ++++++++++++++++++++++++++++++++++++++- test/backend.go | 1 + 3 files changed, 117 insertions(+), 3 deletions(-) diff --git a/backend/s3_test.go b/backend/s3_test.go index 0170f126e..611221085 100644 --- a/backend/s3_test.go +++ b/backend/s3_test.go @@ -12,7 +12,11 @@ type deleter interface { } func TestS3Backend(t *testing.T) { - be, err := s3.Open("127.0.0.1:9000", "restictestbucket") + if TestS3Server == "" { + t.Skip("s3 test server not available") + } + + be, err := s3.Open(TestS3Server, "restictestbucket") OK(t, err) testBackend(be, t) diff --git a/run_integration_tests.go b/run_integration_tests.go index d8d03bb14..cbf2e2468 100644 --- a/run_integration_tests.go +++ b/run_integration_tests.go @@ -4,6 +4,7 @@ package main import ( "fmt" + "io/ioutil" "os" "os/exec" "path/filepath" @@ -21,6 +22,10 @@ type TravisEnvironment struct { goxOS []string } +var envVendorExperiment = map[string]string{ + "GO15VENDOREXPERIMENT": "1", +} + func (env *TravisEnvironment) Prepare() { msg("preparing environment for Travis CI\n") @@ -28,6 +33,7 @@ func (env *TravisEnvironment) Prepare() { run("go", "get", "github.com/mattn/goveralls") run("go", "get", "github.com/pierrre/gotestcover") run("go", "get", "github.com/mitchellh/gox") + runWithEnv(envVendorExperiment, "go", "get", "github.com/minio/minio") if runtime.GOOS == "darwin" { // install the libraries necessary for fuse @@ -79,8 +85,20 @@ func (env *TravisEnvironment) RunTests() { // run the build script run("go", "run", "build.go") - // run tests and gather coverage information - run("gotestcover", "-coverprofile", "all.cov", "./...") + minioCmd, err := runMinio() + if err != nil { + fmt.Fprintf(os.Stderr, "error running minio server: %v", err) + os.Exit(4) + } + + // run the tests and gather coverage information + runWithEnv(minioEnv, "gotestcover", "-coverprofile", "all.cov", "./...") + + err = minioCmd.Process.Kill() + if err != nil { + fmt.Fprintf(os.Stderr, "error stopping minio server: %v", err) + os.Exit(4) + } runGofmt() } @@ -89,6 +107,7 @@ type AppveyorEnvironment struct{} func (env *AppveyorEnvironment) Prepare() { msg("preparing environment for Appveyor CI\n") + runWithEnv(envVendorExperiment, "go", "get", "github.com/minio/minio") } func (env *AppveyorEnvironment) RunTests() { @@ -121,6 +140,26 @@ func msg(format string, args ...interface{}) { fmt.Printf("CI: "+format, args...) } +func updateEnv(env []string, override map[string]string) []string { + var newEnv []string + for _, s := range env { + d := strings.SplitN(s, "=", 2) + key := d[0] + + if _, ok := override[key]; ok { + continue + } + + newEnv = append(newEnv, s) + } + + for k, v := range override { + newEnv = append(newEnv, k+"="+v) + } + + return newEnv +} + func runGofmt() { dir, err := os.Getwd() if err != nil { @@ -155,9 +194,19 @@ func runGofmt() { func run(command string, args ...string) { msg("run %v %v\n", command, strings.Join(args, " ")) + runWithEnv(nil, command, args...) +} + +// runWithEnv calls a command with the current environment, except the entries +// of the env map are set additionally. +func runWithEnv(env map[string]string, command string, args ...string) { + msg("runWithEnv %v %v\n", command, strings.Join(args, " ")) cmd := exec.Command(command, args...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr + if env != nil { + cmd.Env = updateEnv(os.Environ(), env) + } err := cmd.Run() if err != nil { @@ -167,6 +216,66 @@ func run(command string, args ...string) { } } +var minioConfig = ` +{ + "version": "2", + "credentials": { + "accessKeyId": "KEBIYDZ87HCIH5D17YCN", + "secretAccessKey": "bVX1KhipSBPopEfmhc7rGz8ooxx27xdJ7Gkh1mVe" + } +} +` + +var minioEnv = map[string]string{ + "RESTIC_TEST_S3_SERVER": "127.0.0.1:9000", + "AWS_ACCESS_KEY_ID": "KEBIYDZ87HCIH5D17YCN", + "AWS_SECRET_ACCESS_KEY": "bVX1KhipSBPopEfmhc7rGz8ooxx27xdJ7Gkh1mVe", +} + +// runMinio prepares and runs a minio server for the s3 backend tests in a +// temporary directory. +func runMinio() (*exec.Cmd, error) { + cfgdir, err := ioutil.TempDir("", "minio-config-") + if err != nil { + return nil, err + } + + cfg, err := os.Create(filepath.Join(cfgdir, "config.json")) + if err != nil { + return nil, err + } + + _, err = cfg.Write([]byte(minioConfig)) + if err != nil { + return nil, err + } + + err = cfg.Close() + if err != nil { + return nil, err + } + + dir, err := ioutil.TempDir("", "minio-root") + if err != nil { + return nil, err + } + + logfile, err := os.Create(filepath.Join(cfgdir, "output")) + if err != nil { + return nil, err + } + + cmd := exec.Command("minio", "--config-folder", cfgdir, "server", dir) + cmd.Stdout = logfile + cmd.Stderr = logfile + err = cmd.Start() + if err != nil { + return nil, err + } + + return cmd, nil +} + func isTravis() bool { return os.Getenv("TRAVIS_BUILD_DIR") != "" } diff --git a/test/backend.go b/test/backend.go index 8c0778abf..8dea18578 100644 --- a/test/backend.go +++ b/test/backend.go @@ -22,6 +22,7 @@ var ( TestSFTPPath = getStringVar("RESTIC_TEST_SFTPPATH", "/usr/lib/ssh:/usr/lib/openssh") TestWalkerPath = getStringVar("RESTIC_TEST_PATH", ".") BenchArchiveDirectory = getStringVar("RESTIC_BENCH_DIR", ".") + TestS3Server = getStringVar("RESTIC_TEST_S3_SERVER", "") ) func getStringVar(name, defaultValue string) string { From 3e422c87768094d8dae6e1cfc43be037f8bf70a6 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 19 Dec 2015 17:36:48 +0100 Subject: [PATCH 13/55] Add debug output, listen on localhost --- run_integration_tests.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/run_integration_tests.go b/run_integration_tests.go index cbf2e2468..d3cef5b4d 100644 --- a/run_integration_tests.go +++ b/run_integration_tests.go @@ -235,6 +235,7 @@ var minioEnv = map[string]string{ // runMinio prepares and runs a minio server for the s3 backend tests in a // temporary directory. func runMinio() (*exec.Cmd, error) { + msg("running minio server\n") cfgdir, err := ioutil.TempDir("", "minio-config-") if err != nil { return nil, err @@ -265,7 +266,10 @@ func runMinio() (*exec.Cmd, error) { return nil, err } - cmd := exec.Command("minio", "--config-folder", cfgdir, "server", dir) + cmd := exec.Command("minio", + "--config-folder", cfgdir, + "--address", "127.0.0.1:9000", + "server", dir) cmd.Stdout = logfile cmd.Stderr = logfile err = cmd.Start() From 34e8f63f775050115cd8adb50cfbb36eb38b8dd3 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 19 Dec 2015 17:52:17 +0100 Subject: [PATCH 14/55] Increase debug output for minio server --- run_integration_tests.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/run_integration_tests.go b/run_integration_tests.go index d3cef5b4d..e1bdbc736 100644 --- a/run_integration_tests.go +++ b/run_integration_tests.go @@ -88,7 +88,7 @@ func (env *TravisEnvironment) RunTests() { minioCmd, err := runMinio() if err != nil { fmt.Fprintf(os.Stderr, "error running minio server: %v", err) - os.Exit(4) + os.Exit(8) } // run the tests and gather coverage information @@ -97,7 +97,7 @@ func (env *TravisEnvironment) RunTests() { err = minioCmd.Process.Kill() if err != nil { fmt.Fprintf(os.Stderr, "error stopping minio server: %v", err) - os.Exit(4) + os.Exit(8) } runGofmt() @@ -261,17 +261,19 @@ func runMinio() (*exec.Cmd, error) { return nil, err } - logfile, err := os.Create(filepath.Join(cfgdir, "output")) - if err != nil { - return nil, err - } + // logfile, err := os.Create(filepath.Join(cfgdir, "output")) + // if err != nil { + // return nil, err + // } cmd := exec.Command("minio", "--config-folder", cfgdir, "--address", "127.0.0.1:9000", "server", dir) - cmd.Stdout = logfile - cmd.Stderr = logfile + // cmd.Stdout = logfile + // cmd.Stderr = logfile + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr err = cmd.Start() if err != nil { return nil, err From d5e36bd2f007687f2cb90988dffe483411959bcd Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 19 Dec 2015 18:22:57 +0100 Subject: [PATCH 15/55] Only run minio server for Go >= 1.5.1 --- run_integration_tests.go | 59 ++++++++++++++++++++++++++++------------ 1 file changed, 42 insertions(+), 17 deletions(-) diff --git a/run_integration_tests.go b/run_integration_tests.go index e1bdbc736..f936f02c4 100644 --- a/run_integration_tests.go +++ b/run_integration_tests.go @@ -8,6 +8,7 @@ import ( "os" "os/exec" "path/filepath" + "regexp" "runtime" "strings" ) @@ -65,6 +66,20 @@ func (env *TravisEnvironment) Prepare() { } } +func goVersionAtLeast151() bool { + v := runtime.Version() + + if match, _ := regexp.MatchString(`^go1\.[0-4]`, v); match { + return false + } + + if v == "go1.5" { + return false + } + + return true +} + func (env *TravisEnvironment) RunTests() { // run fuse tests on darwin if runtime.GOOS != "darwin" { @@ -85,19 +100,31 @@ func (env *TravisEnvironment) RunTests() { // run the build script run("go", "run", "build.go") - minioCmd, err := runMinio() - if err != nil { - fmt.Fprintf(os.Stderr, "error running minio server: %v", err) - os.Exit(8) + var ( + testEnv map[string]string + minioCmd *exec.Cmd + err error + ) + + if goVersionAtLeast151() { + minioCmd, err = runMinio() + if err != nil { + fmt.Fprintf(os.Stderr, "error running minio server: %v", err) + os.Exit(8) + } + + testEnv = minioEnv } // run the tests and gather coverage information - runWithEnv(minioEnv, "gotestcover", "-coverprofile", "all.cov", "./...") + runWithEnv(testEnv, "gotestcover", "-coverprofile", "all.cov", "./...") - err = minioCmd.Process.Kill() - if err != nil { - fmt.Fprintf(os.Stderr, "error stopping minio server: %v", err) - os.Exit(8) + if minioCmd != nil { + err := minioCmd.Process.Kill() + if err != nil { + fmt.Fprintf(os.Stderr, "error stopping minio server: %v", err) + os.Exit(8) + } } runGofmt() @@ -261,19 +288,17 @@ func runMinio() (*exec.Cmd, error) { return nil, err } - // logfile, err := os.Create(filepath.Join(cfgdir, "output")) - // if err != nil { - // return nil, err - // } + logfile, err := os.Create(filepath.Join(cfgdir, "output")) + if err != nil { + return nil, err + } cmd := exec.Command("minio", "--config-folder", cfgdir, "--address", "127.0.0.1:9000", "server", dir) - // cmd.Stdout = logfile - // cmd.Stderr = logfile - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr + cmd.Stdout = logfile + cmd.Stderr = logfile err = cmd.Start() if err != nil { return nil, err From e96f28c536cb6a4713cffd63998f4b33f043dd47 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 20 Dec 2015 18:09:35 +0100 Subject: [PATCH 16/55] Output stderr when minio server failed --- run_integration_tests.go | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/run_integration_tests.go b/run_integration_tests.go index f936f02c4..349477985 100644 --- a/run_integration_tests.go +++ b/run_integration_tests.go @@ -3,7 +3,9 @@ package main import ( + "bytes" "fmt" + "io" "io/ioutil" "os" "os/exec" @@ -288,22 +290,28 @@ func runMinio() (*exec.Cmd, error) { return nil, err } - logfile, err := os.Create(filepath.Join(cfgdir, "output")) - if err != nil { - return nil, err - } + out := bytes.NewBuffer(nil) cmd := exec.Command("minio", "--config-folder", cfgdir, "--address", "127.0.0.1:9000", "server", dir) - cmd.Stdout = logfile - cmd.Stderr = logfile + cmd.Stdout = out + cmd.Stderr = out err = cmd.Start() if err != nil { return nil, err } + go func() { + err := cmd.Wait() + if err != nil { + fmt.Fprintf(os.Stderr, "error running minio server: %v, output:\n", err) + io.Copy(os.Stderr, out) + os.Exit(12) + } + }() + return cmd, nil } From 0b12ceabe937076fb0582370fde6006f9f8986f4 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 20 Dec 2015 18:10:29 +0100 Subject: [PATCH 17/55] Dockerfile: Install go in home dir This allows cross-compilation with gox with Go < 1.5 --- Dockerfile | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/Dockerfile b/Dockerfile index dc70958d1..ca00c649f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,25 +22,23 @@ ARG GOARCH=amd64 RUN apt-get update RUN apt-get install -y --no-install-recommends ca-certificates wget git build-essential -# download and install Go -RUN wget -q -O /tmp/go.tar.gz https://storage.googleapis.com/golang/go${GOVERSION}.linux-${GOARCH}.tar.gz -RUN cd /var/lib && tar xf /tmp/go.tar.gz -RUN rm -f /tmp/go.tar.gz - -ENV GOROOT /var/lib/go -ENV PATH $PATH:$GOROOT/bin - # add and configure user ENV HOME /home/travis -ENV GOPATH $HOME/gopath -ENV PATH $PATH:$GOPATH/bin RUN useradd -m -d $HOME -s /bin/bash travis # run everything below as user travis USER travis WORKDIR $HOME -# make gopath +# download and install Go +RUN wget -q -O /tmp/go.tar.gz https://storage.googleapis.com/golang/go${GOVERSION}.linux-${GOARCH}.tar.gz +RUN tar xf /tmp/go.tar.gz && rm -f /tmp/go.tar.gz +ENV GOROOT $HOME/go +ENV PATH $PATH:$GOROOT/bin + +ENV GOPATH $HOME/gopath +ENV PATH $PATH:$GOPATH/bin + RUN mkdir -p $GOPATH/src/github.com/restic/restic # install tools From 43cf95e3c65d0a26a6ad5949d3156df3ce0cd3ce Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 20 Dec 2015 20:42:17 +0100 Subject: [PATCH 18/55] Correctly stop the minio server after the tests --- run_integration_tests.go | 74 ++++++++++++++++++++++++++-------------- 1 file changed, 49 insertions(+), 25 deletions(-) diff --git a/run_integration_tests.go b/run_integration_tests.go index 349477985..62693b8da 100644 --- a/run_integration_tests.go +++ b/run_integration_tests.go @@ -5,7 +5,6 @@ package main import ( "bytes" "fmt" - "io" "io/ioutil" "os" "os/exec" @@ -13,6 +12,7 @@ import ( "regexp" "runtime" "strings" + "sync" ) type CIEnvironment interface { @@ -82,6 +82,12 @@ func goVersionAtLeast151() bool { return true } +type MinioServer struct { + cmd *exec.Cmd + done bool + m sync.Mutex +} + func (env *TravisEnvironment) RunTests() { // run fuse tests on darwin if runtime.GOOS != "darwin" { @@ -103,13 +109,13 @@ func (env *TravisEnvironment) RunTests() { run("go", "run", "build.go") var ( - testEnv map[string]string - minioCmd *exec.Cmd - err error + testEnv map[string]string + srv *MinioServer + err error ) if goVersionAtLeast151() { - minioCmd, err = runMinio() + srv, err = NewMinioServer() if err != nil { fmt.Fprintf(os.Stderr, "error running minio server: %v", err) os.Exit(8) @@ -121,15 +127,9 @@ func (env *TravisEnvironment) RunTests() { // run the tests and gather coverage information runWithEnv(testEnv, "gotestcover", "-coverprofile", "all.cov", "./...") - if minioCmd != nil { - err := minioCmd.Process.Kill() - if err != nil { - fmt.Fprintf(os.Stderr, "error stopping minio server: %v", err) - os.Exit(8) - } - } - runGofmt() + + srv.Stop() } type AppveyorEnvironment struct{} @@ -261,9 +261,9 @@ var minioEnv = map[string]string{ "AWS_SECRET_ACCESS_KEY": "bVX1KhipSBPopEfmhc7rGz8ooxx27xdJ7Gkh1mVe", } -// runMinio prepares and runs a minio server for the s3 backend tests in a -// temporary directory. -func runMinio() (*exec.Cmd, error) { +// NewMinioServer prepares and runs a minio server for the s3 backend tests in +// a temporary directory. +func NewMinioServer() (*MinioServer, error) { msg("running minio server\n") cfgdir, err := ioutil.TempDir("", "minio-config-") if err != nil { @@ -303,16 +303,40 @@ func runMinio() (*exec.Cmd, error) { return nil, err } - go func() { - err := cmd.Wait() - if err != nil { - fmt.Fprintf(os.Stderr, "error running minio server: %v, output:\n", err) - io.Copy(os.Stderr, out) - os.Exit(12) - } - }() + srv := &MinioServer{cmd: cmd} + go srv.Wait() - return cmd, nil + return srv, nil +} + +func (m *MinioServer) Stop() { + if m == nil { + return + } + + msg("stopping minio server\n") + m.m.Lock() + m.done = true + m.m.Unlock() + err := m.cmd.Process.Kill() + if err != nil { + fmt.Fprintf(os.Stderr, "error stopping minio server: %v", err) + os.Exit(8) + } +} + +func (m *MinioServer) Wait() { + err := m.cmd.Wait() + msg("minio server exited\n") + m.m.Lock() + done := m.done + m.m.Unlock() + + if err != nil && !done { + fmt.Fprintf(os.Stderr, "error running minio server: %#v, output:\n", err) + // io.Copy(os.Stderr, out) + os.Exit(12) + } } func isTravis() bool { From 566a15285ae1796c52597e4beccb08e6104c480c Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 28 Dec 2015 15:51:24 +0100 Subject: [PATCH 19/55] Add repository location parsing code --- backend/local/uri.go | 15 +++++++ backend/s3/uri.go | 51 ++++++++++++++++++++++++ backend/s3/uri_test.go | 33 ++++++++++++++++ backend/sftp/uri.go | 68 ++++++++++++++++++++++++++++++++ backend/sftp/uri_test.go | 52 ++++++++++++++++++++++++ uri/uri.go | 66 +++++++++++++++++++++++++++++++ uri/uri_test.go | 85 ++++++++++++++++++++++++++++++++++++++++ 7 files changed, 370 insertions(+) create mode 100644 backend/local/uri.go create mode 100644 backend/s3/uri.go create mode 100644 backend/s3/uri_test.go create mode 100644 backend/sftp/uri.go create mode 100644 backend/sftp/uri_test.go create mode 100644 uri/uri.go create mode 100644 uri/uri_test.go diff --git a/backend/local/uri.go b/backend/local/uri.go new file mode 100644 index 000000000..456f3c428 --- /dev/null +++ b/backend/local/uri.go @@ -0,0 +1,15 @@ +package local + +import ( + "errors" + "strings" +) + +// ParseConfig parses a local backend config. +func ParseConfig(cfg string) (interface{}, error) { + if !strings.HasPrefix(cfg, "local:") { + return nil, errors.New(`invalid format, prefix "local" not found`) + } + + return cfg[6:], nil +} diff --git a/backend/s3/uri.go b/backend/s3/uri.go new file mode 100644 index 000000000..808a9464b --- /dev/null +++ b/backend/s3/uri.go @@ -0,0 +1,51 @@ +package s3 + +import ( + "errors" + "strings" +) + +// Config contains all configuration necessary to connect to an s3 compatible +// server. +type Config struct { + Host string + KeyID, Secret string + Bucket string +} + +// ParseConfig parses the string s and extracts the s3 config. The two +// supported configuration formats are s3://host/bucketname and +// s3:host:bucketname. The host can also be a valid s3 region name. +func ParseConfig(s string) (interface{}, error) { + if strings.HasPrefix(s, "s3://") { + s = s[5:] + + data := strings.SplitN(s, "/", 2) + if len(data) != 2 { + return nil, errors.New("s3: invalid format, host/region or bucket name not found") + } + + cfg := Config{ + Host: data[0], + Bucket: data[1], + } + + return cfg, nil + } + + data := strings.SplitN(s, ":", 3) + if len(data) != 3 { + return nil, errors.New("s3: invalid format") + } + + if data[0] != "s3" { + return nil, errors.New(`s3: config does not start with "s3"`) + } + + cfg := Config{ + Host: data[1], + Bucket: data[2], + } + + return cfg, nil +} diff --git a/backend/s3/uri_test.go b/backend/s3/uri_test.go new file mode 100644 index 000000000..27f1f63a3 --- /dev/null +++ b/backend/s3/uri_test.go @@ -0,0 +1,33 @@ +package s3 + +import "testing" + +var uriTests = []struct { + s string + cfg Config +}{ + {"s3://eu-central-1/bucketname", Config{ + Host: "eu-central-1", + Bucket: "bucketname", + }}, + {"s3:hostname:foobar", Config{ + Host: "hostname", + Bucket: "foobar", + }}, +} + +func TestParseConfig(t *testing.T) { + for i, test := range uriTests { + cfg, err := ParseConfig(test.s) + if err != nil { + t.Errorf("test %d failed: %v", i, err) + continue + } + + if cfg != test.cfg { + t.Errorf("test %d: wrong config, want:\n %v\ngot:\n %v", + i, test.cfg, cfg) + continue + } + } +} diff --git a/backend/sftp/uri.go b/backend/sftp/uri.go new file mode 100644 index 000000000..ee241facb --- /dev/null +++ b/backend/sftp/uri.go @@ -0,0 +1,68 @@ +package sftp + +import ( + "errors" + "net/url" + "strings" +) + +// Config collects all information required to connect to an sftp server. +type Config struct { + User, Host, Dir string +} + +// ParseConfig extracts all information for the sftp connection from the string s. +func ParseConfig(s string) (interface{}, error) { + if strings.HasPrefix(s, "sftp://") { + return parseFormat1(s) + } + + // otherwise parse in the sftp:user@host:path format, which means we'll get + // "user@host:path" in s + return parseFormat2(s) +} + +// parseFormat1 parses the first format, starting with a slash, so the user +// either specified "sftp://host/path", so we'll get everything after the first +// colon character +func parseFormat1(s string) (Config, error) { + url, err := url.Parse(s) + if err != nil { + return Config{}, err + } + + cfg := Config{ + Host: url.Host, + Dir: url.Path[1:], + } + if url.User != nil { + cfg.User = url.User.Username() + } + return cfg, nil +} + +// parseFormat2 parses the second format, sftp:user@host:path +func parseFormat2(s string) (cfg Config, err error) { + // split user/host and path at the second colon + data := strings.SplitN(s, ":", 3) + if len(data) < 3 { + return Config{}, errors.New("sftp: invalid format, hostname or path not found") + } + + if data[0] != "sftp" { + return Config{}, errors.New(`invalid format, does not start with "sftp:"`) + } + + userhost := data[1] + cfg.Dir = data[2] + + data = strings.SplitN(userhost, "@", 2) + if len(data) == 2 { + cfg.User = data[0] + cfg.Host = data[1] + } else { + cfg.Host = userhost + } + + return cfg, nil +} diff --git a/backend/sftp/uri_test.go b/backend/sftp/uri_test.go new file mode 100644 index 000000000..400660be6 --- /dev/null +++ b/backend/sftp/uri_test.go @@ -0,0 +1,52 @@ +package sftp + +import "testing" + +var uriTests = []struct { + s string + cfg Config +}{ + // first form, user specified sftp://user@host/dir + { + "sftp://user@host/dir/subdir", + Config{User: "user", Host: "host", Dir: "dir/subdir"}, + }, + { + "sftp://host/dir/subdir", + Config{Host: "host", Dir: "dir/subdir"}, + }, + { + "sftp://host//dir/subdir", + Config{Host: "host", Dir: "/dir/subdir"}, + }, + + // second form, user specified sftp:user@host:/dir + { + "sftp:foo@bar:/baz/quux", + Config{User: "foo", Host: "bar", Dir: "/baz/quux"}, + }, + { + "sftp:bar:../baz/quux", + Config{Host: "bar", Dir: "../baz/quux"}, + }, + { + "sftp:fux@bar:baz/qu:ux", + Config{User: "fux", Host: "bar", Dir: "baz/qu:ux"}, + }, +} + +func TestParseConfig(t *testing.T) { + for i, test := range uriTests { + cfg, err := ParseConfig(test.s) + if err != nil { + t.Errorf("test %d failed: %v", i, err) + continue + } + + if cfg != test.cfg { + t.Errorf("test %d: wrong config, want:\n %v\ngot:\n %v", + i, test.cfg, cfg) + continue + } + } +} diff --git a/uri/uri.go b/uri/uri.go new file mode 100644 index 000000000..85d0c8a44 --- /dev/null +++ b/uri/uri.go @@ -0,0 +1,66 @@ +// Package uri implements parsing the restic repository location from a string. +package uri + +import ( + "strings" + + "github.com/restic/restic/backend/local" + "github.com/restic/restic/backend/s3" + "github.com/restic/restic/backend/sftp" +) + +// URI specifies the location of a repository, including the method of access +// and (possibly) credentials needed for access. +type URI struct { + Scheme string + Config interface{} +} + +type parser struct { + scheme string + parse func(string) (interface{}, error) +} + +// parsers is a list of valid config parsers for the backends. The first parser +// is the fallback and should always be set to the local backend. +var parsers = []parser{ + {"local", local.ParseConfig}, + {"sftp", sftp.ParseConfig}, + {"s3", s3.ParseConfig}, +} + +// ParseURI parses a repository location from the string s. If s starts with a +// backend name followed by a colon, that backend's Parse() function is called. +// Otherwise, the local backend is used which interprets s as the name of a +// directory. +func ParseURI(s string) (u URI, err error) { + scheme := extractScheme(s) + u.Scheme = scheme + + for _, parser := range parsers { + if parser.scheme != scheme { + continue + } + + u.Config, err = parser.parse(s) + if err != nil { + return URI{}, err + } + + return u, nil + } + + // try again, with the local parser and the prefix "local:" + u.Scheme = "local" + u.Config, err = local.ParseConfig("local:" + s) + if err != nil { + return URI{}, err + } + + return u, nil +} + +func extractScheme(s string) string { + data := strings.SplitN(s, ":", 2) + return data[0] +} diff --git a/uri/uri_test.go b/uri/uri_test.go new file mode 100644 index 000000000..8aff27b51 --- /dev/null +++ b/uri/uri_test.go @@ -0,0 +1,85 @@ +package uri + +import ( + "reflect" + "testing" + + "github.com/restic/restic/backend/s3" + "github.com/restic/restic/backend/sftp" +) + +var parseTests = []struct { + s string + u URI +}{ + {"local:/srv/repo", URI{Scheme: "local", Config: "/srv/repo"}}, + {"local:dir1/dir2", URI{Scheme: "local", Config: "dir1/dir2"}}, + {"local:dir1/dir2", URI{Scheme: "local", Config: "dir1/dir2"}}, + {"dir1/dir2", URI{Scheme: "local", Config: "dir1/dir2"}}, + {"local:../dir1/dir2", URI{Scheme: "local", Config: "../dir1/dir2"}}, + {"/dir1/dir2", URI{Scheme: "local", Config: "/dir1/dir2"}}, + + {"sftp:user@host:/srv/repo", URI{Scheme: "sftp", + Config: sftp.Config{ + User: "user", + Host: "host", + Dir: "/srv/repo", + }}}, + {"sftp:host:/srv/repo", URI{Scheme: "sftp", + Config: sftp.Config{ + User: "", + Host: "host", + Dir: "/srv/repo", + }}}, + {"sftp://user@host/srv/repo", URI{Scheme: "sftp", + Config: sftp.Config{ + User: "user", + Host: "host", + Dir: "srv/repo", + }}}, + {"sftp://user@host//srv/repo", URI{Scheme: "sftp", + Config: sftp.Config{ + User: "user", + Host: "host", + Dir: "/srv/repo", + }}}, + + {"s3://eu-central-1/bucketname", URI{Scheme: "s3", + Config: s3.Config{ + Host: "eu-central-1", + Bucket: "bucketname", + }}, + }, + {"s3://hostname.foo/bucketname", URI{Scheme: "s3", + Config: s3.Config{ + Host: "hostname.foo", + Bucket: "bucketname", + }}, + }, + {"s3:hostname.foo:repo", URI{Scheme: "s3", + Config: s3.Config{ + Host: "hostname.foo", + Bucket: "repo", + }}, + }, +} + +func TestParseURI(t *testing.T) { + for i, test := range parseTests { + u, err := ParseURI(test.s) + if err != nil { + t.Errorf("unexpected error: %v", err) + continue + } + + if test.u.Scheme != u.Scheme { + t.Errorf("test %d: scheme does not match, want %q, got %q", + i, test.u.Scheme, u.Scheme) + } + + if !reflect.DeepEqual(test.u.Config, u.Config) { + t.Errorf("test %d: cfg map does not match, want:\n %#v\ngot: \n %#v", + i, test.u.Config, u.Config) + } + } +} From de933a1d48f8d6deb0065371499708ac7edbb81f Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 28 Dec 2015 15:57:20 +0100 Subject: [PATCH 20/55] Rename URI -> Config/Location --- backend/local/{uri.go => config.go} | 0 backend/s3/{uri.go => config.go} | 0 backend/s3/{uri_test.go => config_test.go} | 4 +-- backend/sftp/{uri.go => config.go} | 0 backend/sftp/{uri_test.go => config_test.go} | 4 +-- uri/uri.go => location/location.go | 18 +++++------ uri/uri_test.go => location/location_test.go | 34 ++++++++++---------- 7 files changed, 30 insertions(+), 30 deletions(-) rename backend/local/{uri.go => config.go} (100%) rename backend/s3/{uri.go => config.go} (100%) rename backend/s3/{uri_test.go => config_test.go} (88%) rename backend/sftp/{uri.go => config.go} (100%) rename backend/sftp/{uri_test.go => config_test.go} (93%) rename uri/uri.go => location/location.go (72%) rename uri/uri_test.go => location/location_test.go (56%) diff --git a/backend/local/uri.go b/backend/local/config.go similarity index 100% rename from backend/local/uri.go rename to backend/local/config.go diff --git a/backend/s3/uri.go b/backend/s3/config.go similarity index 100% rename from backend/s3/uri.go rename to backend/s3/config.go diff --git a/backend/s3/uri_test.go b/backend/s3/config_test.go similarity index 88% rename from backend/s3/uri_test.go rename to backend/s3/config_test.go index 27f1f63a3..8821f9883 100644 --- a/backend/s3/uri_test.go +++ b/backend/s3/config_test.go @@ -2,7 +2,7 @@ package s3 import "testing" -var uriTests = []struct { +var configTests = []struct { s string cfg Config }{ @@ -17,7 +17,7 @@ var uriTests = []struct { } func TestParseConfig(t *testing.T) { - for i, test := range uriTests { + for i, test := range configTests { cfg, err := ParseConfig(test.s) if err != nil { t.Errorf("test %d failed: %v", i, err) diff --git a/backend/sftp/uri.go b/backend/sftp/config.go similarity index 100% rename from backend/sftp/uri.go rename to backend/sftp/config.go diff --git a/backend/sftp/uri_test.go b/backend/sftp/config_test.go similarity index 93% rename from backend/sftp/uri_test.go rename to backend/sftp/config_test.go index 400660be6..7d5399de4 100644 --- a/backend/sftp/uri_test.go +++ b/backend/sftp/config_test.go @@ -2,7 +2,7 @@ package sftp import "testing" -var uriTests = []struct { +var configTests = []struct { s string cfg Config }{ @@ -36,7 +36,7 @@ var uriTests = []struct { } func TestParseConfig(t *testing.T) { - for i, test := range uriTests { + for i, test := range configTests { cfg, err := ParseConfig(test.s) if err != nil { t.Errorf("test %d failed: %v", i, err) diff --git a/uri/uri.go b/location/location.go similarity index 72% rename from uri/uri.go rename to location/location.go index 85d0c8a44..a4b344000 100644 --- a/uri/uri.go +++ b/location/location.go @@ -1,5 +1,5 @@ -// Package uri implements parsing the restic repository location from a string. -package uri +// Package location implements parsing the restic repository location from a string. +package location import ( "strings" @@ -9,9 +9,9 @@ import ( "github.com/restic/restic/backend/sftp" ) -// URI specifies the location of a repository, including the method of access -// and (possibly) credentials needed for access. -type URI struct { +// Location specifies the location of a repository, including the method of +// access and (possibly) credentials needed for access. +type Location struct { Scheme string Config interface{} } @@ -29,11 +29,11 @@ var parsers = []parser{ {"s3", s3.ParseConfig}, } -// ParseURI parses a repository location from the string s. If s starts with a +// ParseLocation parses a repository location from the string s. If s starts with a // backend name followed by a colon, that backend's Parse() function is called. // Otherwise, the local backend is used which interprets s as the name of a // directory. -func ParseURI(s string) (u URI, err error) { +func ParseLocation(s string) (u Location, err error) { scheme := extractScheme(s) u.Scheme = scheme @@ -44,7 +44,7 @@ func ParseURI(s string) (u URI, err error) { u.Config, err = parser.parse(s) if err != nil { - return URI{}, err + return Location{}, err } return u, nil @@ -54,7 +54,7 @@ func ParseURI(s string) (u URI, err error) { u.Scheme = "local" u.Config, err = local.ParseConfig("local:" + s) if err != nil { - return URI{}, err + return Location{}, err } return u, nil diff --git a/uri/uri_test.go b/location/location_test.go similarity index 56% rename from uri/uri_test.go rename to location/location_test.go index 8aff27b51..096f0fa16 100644 --- a/uri/uri_test.go +++ b/location/location_test.go @@ -1,4 +1,4 @@ -package uri +package location import ( "reflect" @@ -10,53 +10,53 @@ import ( var parseTests = []struct { s string - u URI + u Location }{ - {"local:/srv/repo", URI{Scheme: "local", Config: "/srv/repo"}}, - {"local:dir1/dir2", URI{Scheme: "local", Config: "dir1/dir2"}}, - {"local:dir1/dir2", URI{Scheme: "local", Config: "dir1/dir2"}}, - {"dir1/dir2", URI{Scheme: "local", Config: "dir1/dir2"}}, - {"local:../dir1/dir2", URI{Scheme: "local", Config: "../dir1/dir2"}}, - {"/dir1/dir2", URI{Scheme: "local", Config: "/dir1/dir2"}}, + {"local:/srv/repo", Location{Scheme: "local", Config: "/srv/repo"}}, + {"local:dir1/dir2", Location{Scheme: "local", Config: "dir1/dir2"}}, + {"local:dir1/dir2", Location{Scheme: "local", Config: "dir1/dir2"}}, + {"dir1/dir2", Location{Scheme: "local", Config: "dir1/dir2"}}, + {"local:../dir1/dir2", Location{Scheme: "local", Config: "../dir1/dir2"}}, + {"/dir1/dir2", Location{Scheme: "local", Config: "/dir1/dir2"}}, - {"sftp:user@host:/srv/repo", URI{Scheme: "sftp", + {"sftp:user@host:/srv/repo", Location{Scheme: "sftp", Config: sftp.Config{ User: "user", Host: "host", Dir: "/srv/repo", }}}, - {"sftp:host:/srv/repo", URI{Scheme: "sftp", + {"sftp:host:/srv/repo", Location{Scheme: "sftp", Config: sftp.Config{ User: "", Host: "host", Dir: "/srv/repo", }}}, - {"sftp://user@host/srv/repo", URI{Scheme: "sftp", + {"sftp://user@host/srv/repo", Location{Scheme: "sftp", Config: sftp.Config{ User: "user", Host: "host", Dir: "srv/repo", }}}, - {"sftp://user@host//srv/repo", URI{Scheme: "sftp", + {"sftp://user@host//srv/repo", Location{Scheme: "sftp", Config: sftp.Config{ User: "user", Host: "host", Dir: "/srv/repo", }}}, - {"s3://eu-central-1/bucketname", URI{Scheme: "s3", + {"s3://eu-central-1/bucketname", Location{Scheme: "s3", Config: s3.Config{ Host: "eu-central-1", Bucket: "bucketname", }}, }, - {"s3://hostname.foo/bucketname", URI{Scheme: "s3", + {"s3://hostname.foo/bucketname", Location{Scheme: "s3", Config: s3.Config{ Host: "hostname.foo", Bucket: "bucketname", }}, }, - {"s3:hostname.foo:repo", URI{Scheme: "s3", + {"s3:hostname.foo:repo", Location{Scheme: "s3", Config: s3.Config{ Host: "hostname.foo", Bucket: "repo", @@ -64,9 +64,9 @@ var parseTests = []struct { }, } -func TestParseURI(t *testing.T) { +func TestParseLocation(t *testing.T) { for i, test := range parseTests { - u, err := ParseURI(test.s) + u, err := ParseLocation(test.s) if err != nil { t.Errorf("unexpected error: %v", err) continue From 3d2a714b5a0ffc5611e44254c2217d8ec6977c34 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 28 Dec 2015 16:19:34 +0100 Subject: [PATCH 21/55] Update minio-go library --- Godeps/Godeps.json | 1 + .../src/github.com/minio/minio-go/api-core.go | 61 ++- .../minio/minio-go/api-multipart-core.go | 8 +- .../src/github.com/minio/minio-go/api.go | 82 ++- .../minio/minio-go/api_private_test.go | 18 + .../minio/minio-go/api_public_test.go | 2 +- .../src/github.com/minio/minio-go/errors.go | 2 +- .../src/github.com/minio/minio-go/io.go | 67 +++ .../minio/minio-go/request-common.go | 283 ++++++++++ .../github.com/minio/minio-go/request-v2.go | 248 +++++++++ .../github.com/minio/minio-go/request-v4.go | 228 ++++++++ .../src/github.com/minio/minio-go/request.go | 498 ------------------ 12 files changed, 961 insertions(+), 537 deletions(-) create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/io.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/request-common.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/request-v2.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/request-v4.go delete mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/request.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index b818dc01e..ec964d080 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -24,6 +24,7 @@ }, { "ImportPath": "github.com/minio/minio-go", + "Comment": "v0.2.5-62-g61f6570", "Rev": "61f6570da0edd761974216c9ed5da485d3cc0c99" }, { diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-core.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-core.go index fd9c23a45..73fffbd29 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-core.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-core.go @@ -209,10 +209,14 @@ func (a apiCore) getBucketACL(bucket string) (accessControlPolicy, error) { if err != nil { return accessControlPolicy{}, err } + // In-case of google private bucket policy doesn't have any Grant list + if a.config.Region == "google" { + return policy, nil + } if policy.AccessControlList.Grant == nil { errorResponse := ErrorResponse{ Code: "InternalError", - Message: "Access control Grant list is empty, please report this at https://github.com/minio/minio-go/issues", + Message: "Access control Grant list is empty, please report this at https://github.com/minio/minio-go/issues.", Resource: separator + bucket, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), @@ -371,7 +375,7 @@ func (a apiCore) headBucket(bucket string) error { case http.StatusForbidden: errorResponse = ErrorResponse{ Code: "AccessDenied", - Message: "Access Denied", + Message: "Access Denied.", Resource: separator + bucket, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), @@ -434,7 +438,7 @@ func (a apiCore) deleteBucket(bucket string) error { case http.StatusForbidden: errorResponse = ErrorResponse{ Code: "AccessDenied", - Message: "Access Denied", + Message: "Access Denied.", Resource: separator + bucket, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), @@ -442,7 +446,7 @@ func (a apiCore) deleteBucket(bucket string) error { case http.StatusConflict: errorResponse = ErrorResponse{ Code: "Conflict", - Message: "Bucket not empty", + Message: "Bucket not empty.", Resource: separator + bucket, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), @@ -520,7 +524,9 @@ func (a apiCore) putObjectRequest(bucket, object, contentType string, md5SumByte return nil, err } // set Content-MD5 as base64 encoded md5 - r.Set("Content-MD5", base64.StdEncoding.EncodeToString(md5SumBytes)) + if md5SumBytes != nil { + r.Set("Content-MD5", base64.StdEncoding.EncodeToString(md5SumBytes)) + } r.Set("Content-Type", contentType) r.req.ContentLength = size return r, nil @@ -552,6 +558,13 @@ func (a apiCore) presignedPostPolicy(p *PostPolicy) map[string]string { t := time.Now().UTC() r := new(request) r.config = a.config + if r.config.Signature.isV2() { + policyBase64 := p.base64() + p.formData["policy"] = policyBase64 + p.formData["AWSAccessKeyId"] = r.config.AccessKeyID + p.formData["signature"] = r.PostPresignSignatureV2(policyBase64) + return p.formData + } credential := getCredential(r.config.AccessKeyID, r.config.Region, t) p.addNewPolicy(policy{"eq", "$x-amz-date", t.Format(iso8601DateFormat)}) p.addNewPolicy(policy{"eq", "$x-amz-algorithm", authHeader}) @@ -562,7 +575,7 @@ func (a apiCore) presignedPostPolicy(p *PostPolicy) map[string]string { p.formData["x-amz-algorithm"] = authHeader p.formData["x-amz-credential"] = credential p.formData["x-amz-date"] = t.Format(iso8601DateFormat) - p.formData["x-amz-signature"] = r.PostPresignSignature(policyBase64, t) + p.formData["x-amz-signature"] = r.PostPresignSignatureV4(policyBase64, t) return p.formData } @@ -572,10 +585,13 @@ func (a apiCore) presignedPutObject(bucket, object string, expires int64) (strin HTTPMethod: "PUT", HTTPPath: separator + bucket + separator + object, } - r, err := newPresignedRequest(op, a.config, strconv.FormatInt(expires, 10)) + r, err := newPresignedRequest(op, a.config, expires) if err != nil { return "", err } + if r.config.Signature.isV2() { + return r.PreSignV2() + } return r.PreSignV4() } @@ -585,7 +601,7 @@ func (a apiCore) presignedGetObjectRequest(bucket, object string, expires, offse HTTPMethod: "GET", HTTPPath: separator + bucket + separator + object, } - r, err := newPresignedRequest(op, a.config, strconv.FormatInt(expires, 10)) + r, err := newPresignedRequest(op, a.config, expires) if err != nil { return nil, err } @@ -604,11 +620,14 @@ func (a apiCore) presignedGetObject(bucket, object string, expires, offset, leng if err := invalidArgumentError(object); err != nil { return "", err } - req, err := a.presignedGetObjectRequest(bucket, object, expires, offset, length) + r, err := a.presignedGetObjectRequest(bucket, object, expires, offset, length) if err != nil { return "", err } - return req.PreSignV4() + if r.config.Signature.isV2() { + return r.PreSignV2() + } + return r.PreSignV4() } // getObjectRequest wrapper creates a new getObject request @@ -623,12 +642,13 @@ func (a apiCore) getObjectRequest(bucket, object string, offset, length int64) ( return nil, err } switch { - case length > 0 && offset > 0: + case length > 0 && offset >= 0: r.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) case offset > 0 && length == 0: r.Set("Range", fmt.Sprintf("bytes=%d-", offset)) - case length > 0 && offset == 0: - r.Set("Range", fmt.Sprintf("bytes=-%d", length)) + // The final length bytes + case length < 0 && offset == 0: + r.Set("Range", fmt.Sprintf("bytes=%d", length)) } return r, nil } @@ -638,7 +658,8 @@ func (a apiCore) getObjectRequest(bucket, object string, offset, length int64) ( // Additionally this function also takes range arguments to download the specified // range bytes of an object. Setting offset and length = 0 will download the full object. // -// For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. +// For more information about the HTTP Range header. +// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. func (a apiCore) getObject(bucket, object string, offset, length int64) (io.ReadCloser, ObjectStat, error) { if err := invalidArgumentError(object); err != nil { return nil, ObjectStat{}, err @@ -664,7 +685,7 @@ func (a apiCore) getObject(bucket, object string, offset, length int64) (io.Read if err != nil { return nil, ObjectStat{}, ErrorResponse{ Code: "InternalError", - Message: "Last-Modified time format not recognized, please report this issue at https://github.com/minio/minio-go/issues", + Message: "Last-Modified time format not recognized, please report this issue at https://github.com/minio/minio-go/issues.", RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), } @@ -726,7 +747,7 @@ func (a apiCore) deleteObject(bucket, object string) error { case http.StatusForbidden: errorResponse = ErrorResponse{ Code: "AccessDenied", - Message: "Access Denied", + Message: "Access Denied.", Resource: separator + bucket + separator + object, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), @@ -788,7 +809,7 @@ func (a apiCore) headObject(bucket, object string) (ObjectStat, error) { case http.StatusForbidden: errorResponse = ErrorResponse{ Code: "AccessDenied", - Message: "Access Denied", + Message: "Access Denied.", Resource: separator + bucket + separator + object, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), @@ -811,7 +832,7 @@ func (a apiCore) headObject(bucket, object string) (ObjectStat, error) { if err != nil { return ObjectStat{}, ErrorResponse{ Code: "InternalError", - Message: "Content-Length not recognized, please report this issue at https://github.com/minio/minio-go/issues", + Message: "Content-Length not recognized, please report this issue at https://github.com/minio/minio-go/issues.", RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), } @@ -820,7 +841,7 @@ func (a apiCore) headObject(bucket, object string) (ObjectStat, error) { if err != nil { return ObjectStat{}, ErrorResponse{ Code: "InternalError", - Message: "Last-Modified time format not recognized, please report this issue at https://github.com/minio/minio-go/issues", + Message: "Last-Modified time format not recognized, please report this issue at https://github.com/minio/minio-go/issues.", RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), } @@ -867,7 +888,7 @@ func (a apiCore) listBuckets() (listAllMyBucketsResult, error) { if resp.StatusCode == http.StatusTemporaryRedirect { return listAllMyBucketsResult{}, ErrorResponse{ Code: "AccessDenied", - Message: "Anonymous access is forbidden for this operation", + Message: "Anonymous access is forbidden for this operation.", RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-multipart-core.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-multipart-core.go index 05e1f74c7..1236058cd 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-multipart-core.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-multipart-core.go @@ -221,14 +221,14 @@ func (a apiCore) abortMultipartUpload(bucket, object, uploadID string) error { case http.StatusForbidden: errorResponse = ErrorResponse{ Code: "AccessDenied", - Message: "Access Denied", + Message: "Access Denied.", Resource: separator + bucket + separator + object, RequestID: resp.Header.Get("x-amz-request-id"), } default: errorResponse = ErrorResponse{ Code: resp.Status, - Message: "", + Message: "Unknown error, please report this at https://github.com/minio/minio-go-legacy/issues.", Resource: separator + bucket + separator + object, RequestID: resp.Header.Get("x-amz-request-id"), } @@ -299,7 +299,9 @@ func (a apiCore) uploadPartRequest(bucket, object, uploadID string, md5SumBytes return nil, err } // set Content-MD5 as base64 encoded md5 - r.Set("Content-MD5", base64.StdEncoding.EncodeToString(md5SumBytes)) + if md5SumBytes != nil { + r.Set("Content-MD5", base64.StdEncoding.EncodeToString(md5SumBytes)) + } r.req.ContentLength = size return r, nil } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api.go b/Godeps/_workspace/src/github.com/minio/minio-go/api.go index f74f7c574..27ad4ca94 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api.go @@ -149,6 +149,9 @@ var regions = map[string]string{ "s3-ap-northeast-1.amazonaws.com": "ap-northeast-1", "s3-sa-east-1.amazonaws.com": "sa-east-1", "s3.cn-north-1.amazonaws.com.cn": "cn-north-1", + + // Add google cloud storage as one of the regions + "storage.googleapis.com": "google", } // getRegion returns a region based on its endpoint mapping. @@ -161,12 +164,38 @@ func getRegion(host string) (region string) { return "milkyway" } +// SignatureType is type of signature to be used for a request +type SignatureType int + +// Different types of supported signatures - default is Latest i.e SignatureV4 +const ( + Latest SignatureType = iota + SignatureV4 + SignatureV2 +) + +// isV2 - is signature SignatureV2? +func (s SignatureType) isV2() bool { + return s == SignatureV2 +} + +// isV4 - is signature SignatureV4? +func (s SignatureType) isV4() bool { + return s == SignatureV4 +} + +// isLatest - is signature Latest? +func (s SignatureType) isLatest() bool { + return s == Latest +} + // Config - main configuration struct used by all to set endpoint, credentials, and other options for requests. type Config struct { // Standard options AccessKeyID string SecretAccessKey string Endpoint string + Signature SignatureType // Advanced options // Specify this to get server response in non XML style if server supports it @@ -234,13 +263,37 @@ func New(config Config) (API, error) { hostSplits := strings.SplitN(u.Host, ".", 2) u.Host = hostSplits[1] } + matchGoogle, _ := filepath.Match("*.storage.googleapis.com", u.Host) + if matchGoogle { + config.isVirtualStyle = true + hostSplits := strings.SplitN(u.Host, ".", 2) + u.Host = hostSplits[1] + } config.Region = getRegion(u.Host) + if config.Region == "google" { + // Google cloud storage is signature V2 + config.Signature = SignatureV2 + } } config.SetUserAgent(LibraryName, LibraryVersion, runtime.GOOS, runtime.GOARCH) config.isUserAgentSet = false // default return api{apiCore{&config}}, nil } +// PresignedPostPolicy return POST form data that can be used for object upload +func (a api) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { + if p.expiration.IsZero() { + return nil, errors.New("Expiration time must be specified") + } + if _, ok := p.formData["key"]; !ok { + return nil, errors.New("object key must be specified") + } + if _, ok := p.formData["bucket"]; !ok { + return nil, errors.New("bucket name must be specified") + } + return a.presignedPostPolicy(p), nil +} + /// Object operations /// Expires maximum is 7days - ie. 604800 and minimum is 1 @@ -549,20 +602,6 @@ func (a api) continueObjectUpload(bucket, object, uploadID string, size int64, d return nil } -// PresignedPostPolicy return POST form data that can be used for object upload -func (a api) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { - if p.expiration.IsZero() { - return nil, errors.New("Expiration time must be specified") - } - if _, ok := p.formData["key"]; !ok { - return nil, errors.New("object key must be specified") - } - if _, ok := p.formData["bucket"]; !ok { - return nil, errors.New("bucket name must be specified") - } - return a.presignedPostPolicy(p), nil -} - // PutObject create an object in a bucket // // You must have WRITE permissions on a bucket to create an object @@ -588,6 +627,21 @@ func (a api) PutObject(bucket, object, contentType string, size int64, data io.R return nil } } + // Special handling just for Google Cloud Storage. + // TODO - we should remove this in future when we fully implement Resumable object upload. + if a.config.Region == "google" { + if size > maxPartSize { + return ErrorResponse{ + Code: "EntityTooLarge", + Message: "Your proposed upload exceeds the maximum allowed object size.", + Resource: separator + bucket + separator + object, + } + } + if _, err := a.putObject(bucket, object, contentType, nil, size, ReadSeekCloser(data)); err != nil { + return err + } + return nil + } switch { case size < minimumPartSize && size > 0: // Single Part use case, use PutObject directly diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go index b220744b2..23d1832a2 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go @@ -21,6 +21,24 @@ import ( "testing" ) +func TestSignature(t *testing.T) { + conf := new(Config) + if !conf.Signature.isLatest() { + t.Fatalf("Error") + } + conf.Signature = SignatureV2 + if !conf.Signature.isV2() { + t.Fatalf("Error") + } + if conf.Signature.isV4() { + t.Fatalf("Error") + } + conf.Signature = SignatureV4 + if !conf.Signature.isV4() { + t.Fatalf("Error") + } +} + func TestACLTypes(t *testing.T) { want := map[string]bool{ "private": true, diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_public_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_public_test.go index cf82c4812..674f5d770 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api_public_test.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api_public_test.go @@ -51,7 +51,7 @@ func TestBucketOperations(t *testing.T) { if err == nil { t.Fatal("Error") } - if err.Error() != "Access Denied" { + if err.Error() != "Access Denied." { t.Fatal("Error") } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/errors.go b/Godeps/_workspace/src/github.com/minio/minio-go/errors.go index 5626cf0c6..b85e36e51 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/errors.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/errors.go @@ -152,7 +152,7 @@ func invalidObjectError(object string) error { func invalidArgumentError(arg string) error { errorResponse := ErrorResponse{ Code: "InvalidArgument", - Message: "Invalid Argument", + Message: "Invalid Argument.", RequestID: "minio", } if strings.TrimSpace(arg) == "" || arg == "" { diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/io.go b/Godeps/_workspace/src/github.com/minio/minio-go/io.go new file mode 100644 index 000000000..71b4363a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/io.go @@ -0,0 +1,67 @@ +package minio + +import "io" + +// ReadSeekCloser wraps an io.Reader returning a ReaderSeekerCloser +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// Read reads up to len(p) bytes into p. It returns the number of bytes +// read (0 <= n <= len(p)) and any error encountered. Even if Read +// returns n < len(p), it may use all of p as scratch space during the call. +// If some data is available but not len(p) bytes, Read conventionally +// returns what is available instead of waiting for more. +// +// When Read encounters an error or end-of-file condition after +// successfully reading n > 0 bytes, it returns the number of +// bytes read. It may return the (non-nil) error from the same call +// or return the error (and n == 0) from a subsequent call. +// An instance of this general case is that a Reader returning +// a non-zero number of bytes at the end of the input stream may +// return either err == EOF or err == nil. The next Read should +// return 0, EOF. +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read or Write to offset, +// interpreted according to whence: 0 means relative to the start of +// the file, 1 means relative to the current offset, and 2 means +// relative to the end. Seek returns the new offset relative to the +// start of the file and an error, if any. +// +// Seeking to an offset before the start of the file is an error. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// Close closes the ReaderSeekerCloser. +// +// The behavior of Close after the first call is undefined. +// Specific implementations may document their own behavior. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/request-common.go b/Godeps/_workspace/src/github.com/minio/minio-go/request-common.go new file mode 100644 index 000000000..c63c16a13 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/request-common.go @@ -0,0 +1,283 @@ +package minio + +import ( + "encoding/hex" + "io" + "io/ioutil" + "net/http" + "regexp" + "strings" + "unicode/utf8" +) + +// operation - rest operation +type operation struct { + HTTPServer string + HTTPMethod string + HTTPPath string +} + +// request - a http request +type request struct { + req *http.Request + config *Config + body io.ReadSeeker + expires int64 +} + +// Do - start the request +func (r *request) Do() (resp *http.Response, err error) { + if r.config.AccessKeyID != "" && r.config.SecretAccessKey != "" { + if r.config.Signature.isV2() { + r.SignV2() + } + if r.config.Signature.isV4() || r.config.Signature.isLatest() { + r.SignV4() + } + } + transport := http.DefaultTransport + if r.config.Transport != nil { + transport = r.config.Transport + } + // do not use http.Client{}, while it may seem intuitive but the problem seems to be + // that http.Client{} internally follows redirects and there is no easier way to disable + // it from outside using a configuration parameter - + // this auto redirect causes complications in verifying subsequent errors + // + // The best is to use RoundTrip() directly, so the request comes back to the caller where + // we are going to handle such replies. And indeed that is the right thing to do here. + // + return transport.RoundTrip(r.req) +} + +// Set - set additional headers if any +func (r *request) Set(key, value string) { + r.req.Header.Set(key, value) +} + +// Get - get header values +func (r *request) Get(key string) string { + return r.req.Header.Get(key) +} + +func path2BucketAndObject(path string) (bucketName, objectName string) { + pathSplits := strings.SplitN(path, "?", 2) + splits := strings.SplitN(pathSplits[0], separator, 3) + switch len(splits) { + case 0, 1: + bucketName = "" + objectName = "" + case 2: + bucketName = splits[1] + objectName = "" + case 3: + bucketName = splits[1] + objectName = splits[2] + } + return bucketName, objectName +} + +// path2Object gives objectName from URL path +func path2Object(path string) (objectName string) { + _, objectName = path2BucketAndObject(path) + return +} + +// path2Bucket gives bucketName from URL path +func path2Bucket(path string) (bucketName string) { + bucketName, _ = path2BucketAndObject(path) + return +} + +// path2Query gives query part from URL path +func path2Query(path string) (query string) { + pathSplits := strings.SplitN(path, "?", 2) + if len(pathSplits) > 1 { + query = pathSplits[1] + } + return +} + +// getURLEncodedPath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func getURLEncodedPath(pathName string) string { + // if object matches reserved string, no need to encode them + reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + if reservedNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} + +func (op *operation) getRequestURL(config Config) (url string) { + // parse URL for the combination of HTTPServer + HTTPPath + url = op.HTTPServer + separator + if !config.isVirtualStyle { + url += path2Bucket(op.HTTPPath) + } + objectName := getURLEncodedPath(path2Object(op.HTTPPath)) + queryPath := path2Query(op.HTTPPath) + if objectName == "" && queryPath != "" { + url += "?" + queryPath + return + } + if objectName != "" && queryPath == "" { + if strings.HasSuffix(url, separator) { + url += objectName + } else { + url += separator + objectName + } + return + } + if objectName != "" && queryPath != "" { + if strings.HasSuffix(url, separator) { + url += objectName + "?" + queryPath + } else { + url += separator + objectName + "?" + queryPath + } + } + return +} + +func newPresignedRequest(op *operation, config *Config, expires int64) (*request, error) { + // if no method default to POST + method := op.HTTPMethod + if method == "" { + method = "POST" + } + + u := op.getRequestURL(*config) + + // get a new HTTP request, for the requested method + req, err := http.NewRequest(method, u, nil) + if err != nil { + return nil, err + } + + // set UserAgent + req.Header.Set("User-Agent", config.userAgent) + + // set Accept header for response encoding style, if available + if config.AcceptType != "" { + req.Header.Set("Accept", config.AcceptType) + } + + // save for subsequent use + r := new(request) + r.config = config + r.expires = expires + r.req = req + r.body = nil + + return r, nil +} + +// newUnauthenticatedRequest - instantiate a new unauthenticated request +func newUnauthenticatedRequest(op *operation, config *Config, body io.Reader) (*request, error) { + // if no method default to POST + method := op.HTTPMethod + if method == "" { + method = "POST" + } + + u := op.getRequestURL(*config) + + // get a new HTTP request, for the requested method + req, err := http.NewRequest(method, u, nil) + if err != nil { + return nil, err + } + + // set UserAgent + req.Header.Set("User-Agent", config.userAgent) + + // set Accept header for response encoding style, if available + if config.AcceptType != "" { + req.Header.Set("Accept", config.AcceptType) + } + + // add body + switch { + case body == nil: + req.Body = nil + default: + req.Body = ioutil.NopCloser(body) + } + + // save for subsequent use + r := new(request) + r.req = req + r.config = config + + return r, nil +} + +// newRequest - instantiate a new request +func newRequest(op *operation, config *Config, body io.ReadSeeker) (*request, error) { + // if no method default to POST + method := op.HTTPMethod + if method == "" { + method = "POST" + } + + u := op.getRequestURL(*config) + + // get a new HTTP request, for the requested method + req, err := http.NewRequest(method, u, nil) + if err != nil { + return nil, err + } + + // set UserAgent + req.Header.Set("User-Agent", config.userAgent) + + // set Accept header for response encoding style, if available + if config.AcceptType != "" { + req.Header.Set("Accept", config.AcceptType) + } + + // add body + switch { + case body == nil: + req.Body = nil + default: + req.Body = ioutil.NopCloser(body) + } + + // save for subsequent use + r := new(request) + r.config = config + r.req = req + r.body = body + + return r, nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/request-v2.go b/Godeps/_workspace/src/github.com/minio/minio-go/request-v2.go new file mode 100644 index 000000000..aac4066b6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/request-v2.go @@ -0,0 +1,248 @@ +/* + * Minio Go Library for Amazon S3 Legacy v2 Signature Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "errors" + "fmt" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" +) + +// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE} +func (r *request) PreSignV2() (string, error) { + if r.config.AccessKeyID == "" || r.config.SecretAccessKey == "" { + return "", errors.New("presign requires accesskey and secretkey") + } + // Add date if not present + d := time.Now().UTC() + if date := r.Get("Date"); date == "" { + r.Set("Date", d.Format(http.TimeFormat)) + } + epochExpires := d.Unix() + r.expires + var path string + if r.config.isVirtualStyle { + for k, v := range regions { + if v == r.config.Region { + path = "/" + strings.TrimSuffix(r.req.URL.Host, "."+k) + path += r.req.URL.Path + path = getURLEncodedPath(path) + break + } + } + } else { + path = getURLEncodedPath(r.req.URL.Path) + } + signText := fmt.Sprintf("%s\n\n\n%d\n%s", r.req.Method, epochExpires, path) + hm := hmac.New(sha1.New, []byte(r.config.SecretAccessKey)) + hm.Write([]byte(signText)) + + query := r.req.URL.Query() + query.Set("AWSAccessKeyId", r.config.AccessKeyID) + query.Set("Expires", strconv.FormatInt(epochExpires, 10)) + query.Set("Signature", base64.StdEncoding.EncodeToString(hm.Sum(nil))) + r.req.URL.RawQuery = query.Encode() + + return r.req.URL.String(), nil +} + +func (r *request) PostPresignSignatureV2(policyBase64 string) string { + hm := hmac.New(sha1.New, []byte(r.config.SecretAccessKey)) + hm.Write([]byte(policyBase64)) + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + return signature +} + +// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; +// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ); +// +// StringToSign = HTTP-Verb + "\n" + +// Content-MD5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// CanonicalizedProtocolHeaders = + +// SignV2 the request before Do() (version 2.0) +func (r *request) SignV2() { + // Add date if not present + if date := r.Get("Date"); date == "" { + r.Set("Date", time.Now().UTC().Format(http.TimeFormat)) + } + // Calculate HMAC for secretAccessKey + hm := hmac.New(sha1.New, []byte(r.config.SecretAccessKey)) + hm.Write([]byte(r.getStringToSignV2())) + + // prepare auth header + authHeader := new(bytes.Buffer) + authHeader.WriteString(fmt.Sprintf("AWS %s:", r.config.AccessKeyID)) + encoder := base64.NewEncoder(base64.StdEncoding, authHeader) + encoder.Write(hm.Sum(nil)) + encoder.Close() + + // Set Authorization header + r.req.Header.Set("Authorization", authHeader.String()) +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// Content-MD5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func (r *request) getStringToSignV2() string { + buf := new(bytes.Buffer) + // write standard headers + r.writeDefaultHeaders(buf) + // write canonicalized protocol headers if any + r.writeCanonicalizedHeaders(buf) + // write canonicalized Query resources if any + r.writeCanonicalizedResource(buf) + return buf.String() +} + +func (r *request) writeDefaultHeaders(buf *bytes.Buffer) { + buf.WriteString(r.req.Method) + buf.WriteByte('\n') + buf.WriteString(r.req.Header.Get("Content-MD5")) + buf.WriteByte('\n') + buf.WriteString(r.req.Header.Get("Content-Type")) + buf.WriteByte('\n') + buf.WriteString(r.req.Header.Get("Date")) + buf.WriteByte('\n') +} + +func (r *request) writeCanonicalizedHeaders(buf *bytes.Buffer) { + var protoHeaders []string + vals := make(map[string][]string) + for k, vv := range r.req.Header { + // all the AMZ and GOOG headers should be lowercase + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-amz") { + protoHeaders = append(protoHeaders, lk) + vals[lk] = vv + } + } + sort.Strings(protoHeaders) + for _, k := range protoHeaders { + buf.WriteString(k) + buf.WriteByte(':') + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + if strings.Contains(v, "\n") { + // TODO: "Unfold" long headers that + // span multiple lines (as allowed by + // RFC 2616, section 4.2) by replacing + // the folding white-space (including + // new-line) by a single space. + buf.WriteString(v) + } else { + buf.WriteString(v) + } + } + buf.WriteByte('\n') + } +} + +// Must be sorted: +var resourceList = []string{ + "acl", + "location", + "logging", + "notification", + "partNumber", + "policy", + "response-content-type", + "response-content-language", + "response-expires", + "response-cache-control", + "response-content-disposition", + "response-content-encoding", + "requestPayment", + "torrent", + "uploadId", + "uploads", + "versionId", + "versioning", + "versions", + "website", +} + +// From the Amazon docs: +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +func (r *request) writeCanonicalizedResource(buf *bytes.Buffer) error { + requestURL := r.req.URL + if r.config.isVirtualStyle { + for k, v := range regions { + if v == r.config.Region { + path := "/" + strings.TrimSuffix(requestURL.Host, "."+k) + path += requestURL.Path + buf.WriteString(getURLEncodedPath(path)) + break + } + } + } else { + buf.WriteString(getURLEncodedPath(requestURL.Path)) + } + sort.Strings(resourceList) + if requestURL.RawQuery != "" { + var n int + vals, _ := url.ParseQuery(requestURL.RawQuery) + // loop through all the supported resourceList + for _, resource := range resourceList { + if vv, ok := vals[resource]; ok && len(vv) > 0 { + n++ + // first element + switch n { + case 1: + buf.WriteByte('?') + // the rest + default: + buf.WriteByte('&') + } + buf.WriteString(resource) + // request parameters + if len(vv[0]) > 0 { + buf.WriteByte('=') + buf.WriteString(url.QueryEscape(vv[0])) + } + } + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/request-v4.go b/Godeps/_workspace/src/github.com/minio/minio-go/request-v4.go new file mode 100644 index 000000000..09ef06a9a --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/request-v4.go @@ -0,0 +1,228 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "encoding/hex" + "errors" + "net/http" + "sort" + "strconv" + "strings" + "time" +) + +const ( + authHeader = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" + yyyymmdd = "20060102" +) + +/// +/// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258 +/// +/// User-Agent: +/// +/// This is ignored from signing because signing this causes problems with generating pre-signed URLs +/// (that are executed by other agents) or when customers pass requests through proxies, which may +/// modify the user-agent. +/// +/// Content-Length: +/// +/// This is ignored from signing because generating a pre-signed URL should not provide a content-length +/// constraint, specifically when vending a S3 pre-signed PUT URL. The corollary to this is that when +/// sending regular requests (non-pre-signed), the signature contains a checksum of the body, which +/// implicitly validates the payload length (since changing the number of bytes would change the checksum) +/// and therefore this header is not valuable in the signature. +/// +/// Content-Type: +/// +/// Signing this header causes quite a number of problems in browser environments, where browsers +/// like to modify and normalize the content-type header in different ways. There is more information +/// on this in https://github.com/aws/aws-sdk-js/issues/244. Avoiding this field simplifies logic +/// and reduces the possibility of future bugs +/// +/// Authorization: +/// +/// Is skipped for obvious reasons +/// +var ignoredHeaders = map[string]bool{ + "Authorization": true, + "Content-Type": true, + "Content-Length": true, + "User-Agent": true, +} + +// getHashedPayload get the hexadecimal value of the SHA256 hash of the request payload +func (r *request) getHashedPayload() string { + hash := func() string { + switch { + case r.expires != 0: + return "UNSIGNED-PAYLOAD" + case r.body == nil: + return hex.EncodeToString(sum256([]byte{})) + default: + sum256Bytes, _ := sum256Reader(r.body) + return hex.EncodeToString(sum256Bytes) + } + } + hashedPayload := hash() + if hashedPayload != "UNSIGNED-PAYLOAD" { + r.req.Header.Set("X-Amz-Content-Sha256", hashedPayload) + } + return hashedPayload +} + +// getCanonicalHeaders generate a list of request headers with their values +func (r *request) getCanonicalHeaders() string { + var headers []string + vals := make(map[string][]string) + for k, vv := range r.req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // ignored header + } + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + headers = append(headers, "host") + sort.Strings(headers) + + var buf bytes.Buffer + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + switch { + case k == "host": + buf.WriteString(r.req.URL.Host) + fallthrough + default: + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(v) + } + buf.WriteByte('\n') + } + } + return buf.String() +} + +// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names +func (r *request) getSignedHeaders() string { + var headers []string + for k := range r.req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // ignored header + } + headers = append(headers, strings.ToLower(k)) + } + headers = append(headers, "host") + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getCanonicalRequest generate a canonical request of style +// +// canonicalRequest = +// \n +// \n +// \n +// \n +// \n +// +// +func (r *request) getCanonicalRequest(hashedPayload string) string { + r.req.URL.RawQuery = strings.Replace(r.req.URL.Query().Encode(), "+", "%20", -1) + canonicalRequest := strings.Join([]string{ + r.req.Method, + getURLEncodedPath(r.req.URL.Path), + r.req.URL.RawQuery, + r.getCanonicalHeaders(), + r.getSignedHeaders(), + hashedPayload, + }, "\n") + return canonicalRequest +} + +// getStringToSign a string based on selected query values +func (r *request) getStringToSignV4(canonicalRequest string, t time.Time) string { + stringToSign := authHeader + "\n" + t.Format(iso8601DateFormat) + "\n" + stringToSign = stringToSign + getScope(r.config.Region, t) + "\n" + stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) + return stringToSign +} + +// Presign the request, in accordance with - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html +func (r *request) PreSignV4() (string, error) { + if r.config.AccessKeyID == "" && r.config.SecretAccessKey == "" { + return "", errors.New("presign requires accesskey and secretkey") + } + r.SignV4() + return r.req.URL.String(), nil +} + +func (r *request) PostPresignSignatureV4(policyBase64 string, t time.Time) string { + signingkey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t) + signature := getSignature(signingkey, policyBase64) + return signature +} + +// SignV4 the request before Do(), in accordance with - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +func (r *request) SignV4() { + query := r.req.URL.Query() + if r.expires != 0 { + query.Set("X-Amz-Algorithm", authHeader) + } + t := time.Now().UTC() + // Add date if not present + if r.expires != 0 { + query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + query.Set("X-Amz-Expires", strconv.FormatInt(r.expires, 10)) + } else { + r.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + } + + hashedPayload := r.getHashedPayload() + signedHeaders := r.getSignedHeaders() + if r.expires != 0 { + query.Set("X-Amz-SignedHeaders", signedHeaders) + } + credential := getCredential(r.config.AccessKeyID, r.config.Region, t) + if r.expires != 0 { + query.Set("X-Amz-Credential", credential) + r.req.URL.RawQuery = query.Encode() + } + canonicalRequest := r.getCanonicalRequest(hashedPayload) + stringToSign := r.getStringToSignV4(canonicalRequest, t) + signingKey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t) + signature := getSignature(signingKey, stringToSign) + + if r.expires != 0 { + r.req.URL.RawQuery += "&X-Amz-Signature=" + signature + } else { + // final Authorization header + parts := []string{ + authHeader + " Credential=" + credential, + "SignedHeaders=" + signedHeaders, + "Signature=" + signature, + } + auth := strings.Join(parts, ", ") + r.Set("Authorization", auth) + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/request.go b/Godeps/_workspace/src/github.com/minio/minio-go/request.go deleted file mode 100644 index 74d59b624..000000000 --- a/Godeps/_workspace/src/github.com/minio/minio-go/request.go +++ /dev/null @@ -1,498 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "encoding/hex" - "errors" - "io" - "io/ioutil" - "net/http" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" -) - -// operation - rest operation -type operation struct { - HTTPServer string - HTTPMethod string - HTTPPath string -} - -// request - a http request -type request struct { - req *http.Request - config *Config - body io.ReadSeeker - expires string -} - -const ( - authHeader = "AWS4-HMAC-SHA256" - iso8601DateFormat = "20060102T150405Z" - yyyymmdd = "20060102" -) - -/// -/// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258 -/// -/// User-Agent: -/// -/// This is ignored from signing because signing this causes problems with generating pre-signed URLs -/// (that are executed by other agents) or when customers pass requests through proxies, which may -/// modify the user-agent. -/// -/// Content-Length: -/// -/// This is ignored from signing because generating a pre-signed URL should not provide a content-length -/// constraint, specifically when vending a S3 pre-signed PUT URL. The corollary to this is that when -/// sending regular requests (non-pre-signed), the signature contains a checksum of the body, which -/// implicitly validates the payload length (since changing the number of bytes would change the checksum) -/// and therefore this header is not valuable in the signature. -/// -/// Content-Type: -/// -/// Signing this header causes quite a number of problems in browser environments, where browsers -/// like to modify and normalize the content-type header in different ways. There is more information -/// on this in https://github.com/aws/aws-sdk-js/issues/244. Avoiding this field simplifies logic -/// and reduces the possibility of future bugs -/// -/// Authorization: -/// -/// Is skipped for obvious reasons -/// -var ignoredHeaders = map[string]bool{ - "Authorization": true, - "Content-Type": true, - "Content-Length": true, - "User-Agent": true, -} - -// getURLEncodedPath encode the strings from UTF-8 byte representations to HTML hex escape sequences -// -// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 -// non english characters cannot be parsed due to the nature in which url.Encode() is written -// -// This function on the other hand is a direct replacement for url.Encode() technique to support -// pretty much every UTF-8 character. -func getURLEncodedPath(pathName string) string { - // if object matches reserved string, no need to encode them - reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") - if reservedNames.MatchString(pathName) { - return pathName - } - var encodedPathname string - for _, s := range pathName { - if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) - encodedPathname = encodedPathname + string(s) - continue - } - switch s { - case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) - encodedPathname = encodedPathname + string(s) - continue - default: - len := utf8.RuneLen(s) - if len < 0 { - // if utf8 cannot convert return the same string as is - return pathName - } - u := make([]byte, len) - utf8.EncodeRune(u, s) - for _, r := range u { - hex := hex.EncodeToString([]byte{r}) - encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) - } - } - } - return encodedPathname -} - -func path2BucketAndObject(path string) (bucketName, objectName string) { - pathSplits := strings.SplitN(path, "?", 2) - splits := strings.SplitN(pathSplits[0], separator, 3) - switch len(splits) { - case 0, 1: - bucketName = "" - objectName = "" - case 2: - bucketName = splits[1] - objectName = "" - case 3: - bucketName = splits[1] - objectName = splits[2] - } - return bucketName, objectName -} - -// path2Object gives objectName from URL path -func path2Object(path string) (objectName string) { - _, objectName = path2BucketAndObject(path) - return -} - -// path2Bucket gives bucketName from URL path -func path2Bucket(path string) (bucketName string) { - bucketName, _ = path2BucketAndObject(path) - return -} - -// path2Query gives query part from URL path -func path2Query(path string) (query string) { - pathSplits := strings.SplitN(path, "?", 2) - if len(pathSplits) > 1 { - query = pathSplits[1] - } - return -} - -func (op *operation) getRequestURL(config Config) (url string) { - // parse URL for the combination of HTTPServer + HTTPPath - url = op.HTTPServer + separator - if !config.isVirtualStyle { - url += path2Bucket(op.HTTPPath) - } - objectName := getURLEncodedPath(path2Object(op.HTTPPath)) - queryPath := path2Query(op.HTTPPath) - if objectName == "" && queryPath != "" { - url += "?" + queryPath - return - } - if objectName != "" && queryPath == "" { - if strings.HasSuffix(url, separator) { - url += objectName - } else { - url += separator + objectName - } - return - } - if objectName != "" && queryPath != "" { - if strings.HasSuffix(url, separator) { - url += objectName + "?" + queryPath - } else { - url += separator + objectName + "?" + queryPath - } - } - return -} - -func newPresignedRequest(op *operation, config *Config, expires string) (*request, error) { - // if no method default to POST - method := op.HTTPMethod - if method == "" { - method = "POST" - } - - u := op.getRequestURL(*config) - - // get a new HTTP request, for the requested method - req, err := http.NewRequest(method, u, nil) - if err != nil { - return nil, err - } - - // set UserAgent - req.Header.Set("User-Agent", config.userAgent) - - // set Accept header for response encoding style, if available - if config.AcceptType != "" { - req.Header.Set("Accept", config.AcceptType) - } - - // save for subsequent use - r := new(request) - r.config = config - r.expires = expires - r.req = req - r.body = nil - - return r, nil -} - -// newUnauthenticatedRequest - instantiate a new unauthenticated request -func newUnauthenticatedRequest(op *operation, config *Config, body io.Reader) (*request, error) { - // if no method default to POST - method := op.HTTPMethod - if method == "" { - method = "POST" - } - - u := op.getRequestURL(*config) - - // get a new HTTP request, for the requested method - req, err := http.NewRequest(method, u, nil) - if err != nil { - return nil, err - } - - // set UserAgent - req.Header.Set("User-Agent", config.userAgent) - - // set Accept header for response encoding style, if available - if config.AcceptType != "" { - req.Header.Set("Accept", config.AcceptType) - } - - // add body - switch { - case body == nil: - req.Body = nil - default: - req.Body = ioutil.NopCloser(body) - } - - // save for subsequent use - r := new(request) - r.req = req - r.config = config - - return r, nil -} - -// newRequest - instantiate a new request -func newRequest(op *operation, config *Config, body io.ReadSeeker) (*request, error) { - // if no method default to POST - method := op.HTTPMethod - if method == "" { - method = "POST" - } - - u := op.getRequestURL(*config) - - // get a new HTTP request, for the requested method - req, err := http.NewRequest(method, u, nil) - if err != nil { - return nil, err - } - - // set UserAgent - req.Header.Set("User-Agent", config.userAgent) - - // set Accept header for response encoding style, if available - if config.AcceptType != "" { - req.Header.Set("Accept", config.AcceptType) - } - - // add body - switch { - case body == nil: - req.Body = nil - default: - req.Body = ioutil.NopCloser(body) - } - - // save for subsequent use - r := new(request) - r.config = config - r.req = req - r.body = body - - return r, nil -} - -// Do - start the request -func (r *request) Do() (resp *http.Response, err error) { - if r.config.AccessKeyID != "" && r.config.SecretAccessKey != "" { - r.SignV4() - } - transport := http.DefaultTransport - if r.config.Transport != nil { - transport = r.config.Transport - } - // do not use http.Client{}, while it may seem intuitive but the problem seems to be - // that http.Client{} internally follows redirects and there is no easier way to disable - // it from outside using a configuration parameter - - // this auto redirect causes complications in verifying subsequent errors - // - // The best is to use RoundTrip() directly, so the request comes back to the caller where - // we are going to handle such replies. And indeed that is the right thing to do here. - // - return transport.RoundTrip(r.req) -} - -// Set - set additional headers if any -func (r *request) Set(key, value string) { - r.req.Header.Set(key, value) -} - -// Get - get header values -func (r *request) Get(key string) string { - return r.req.Header.Get(key) -} - -// getHashedPayload get the hexadecimal value of the SHA256 hash of the request payload -func (r *request) getHashedPayload() string { - hash := func() string { - switch { - case r.expires != "": - return "UNSIGNED-PAYLOAD" - case r.body == nil: - return hex.EncodeToString(sum256([]byte{})) - default: - sum256Bytes, _ := sum256Reader(r.body) - return hex.EncodeToString(sum256Bytes) - } - } - hashedPayload := hash() - if hashedPayload != "UNSIGNED-PAYLOAD" { - r.req.Header.Set("X-Amz-Content-Sha256", hashedPayload) - } - return hashedPayload -} - -// getCanonicalHeaders generate a list of request headers with their values -func (r *request) getCanonicalHeaders() string { - var headers []string - vals := make(map[string][]string) - for k, vv := range r.req.Header { - if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { - continue // ignored header - } - headers = append(headers, strings.ToLower(k)) - vals[strings.ToLower(k)] = vv - } - headers = append(headers, "host") - sort.Strings(headers) - - var buf bytes.Buffer - for _, k := range headers { - buf.WriteString(k) - buf.WriteByte(':') - switch { - case k == "host": - buf.WriteString(r.req.URL.Host) - fallthrough - default: - for idx, v := range vals[k] { - if idx > 0 { - buf.WriteByte(',') - } - buf.WriteString(v) - } - buf.WriteByte('\n') - } - } - return buf.String() -} - -// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names -func (r *request) getSignedHeaders() string { - var headers []string - for k := range r.req.Header { - if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { - continue // ignored header - } - headers = append(headers, strings.ToLower(k)) - } - headers = append(headers, "host") - sort.Strings(headers) - return strings.Join(headers, ";") -} - -// getCanonicalRequest generate a canonical request of style -// -// canonicalRequest = -// \n -// \n -// \n -// \n -// \n -// -// -func (r *request) getCanonicalRequest(hashedPayload string) string { - r.req.URL.RawQuery = strings.Replace(r.req.URL.Query().Encode(), "+", "%20", -1) - canonicalRequest := strings.Join([]string{ - r.req.Method, - getURLEncodedPath(r.req.URL.Path), - r.req.URL.RawQuery, - r.getCanonicalHeaders(), - r.getSignedHeaders(), - hashedPayload, - }, "\n") - return canonicalRequest -} - -// getStringToSign a string based on selected query values -func (r *request) getStringToSign(canonicalRequest string, t time.Time) string { - stringToSign := authHeader + "\n" + t.Format(iso8601DateFormat) + "\n" - stringToSign = stringToSign + getScope(r.config.Region, t) + "\n" - stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) - return stringToSign -} - -// Presign the request, in accordance with - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html -func (r *request) PreSignV4() (string, error) { - if r.config.AccessKeyID == "" && r.config.SecretAccessKey == "" { - return "", errors.New("presign requires accesskey and secretkey") - } - r.SignV4() - return r.req.URL.String(), nil -} - -func (r *request) PostPresignSignature(policyBase64 string, t time.Time) string { - signingkey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t) - signature := getSignature(signingkey, policyBase64) - return signature -} - -// SignV4 the request before Do(), in accordance with - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html -func (r *request) SignV4() { - query := r.req.URL.Query() - if r.expires != "" { - query.Set("X-Amz-Algorithm", authHeader) - } - t := time.Now().UTC() - // Add date if not present - if r.expires != "" { - query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) - query.Set("X-Amz-Expires", r.expires) - } else { - r.Set("X-Amz-Date", t.Format(iso8601DateFormat)) - } - - hashedPayload := r.getHashedPayload() - signedHeaders := r.getSignedHeaders() - if r.expires != "" { - query.Set("X-Amz-SignedHeaders", signedHeaders) - } - credential := getCredential(r.config.AccessKeyID, r.config.Region, t) - if r.expires != "" { - query.Set("X-Amz-Credential", credential) - r.req.URL.RawQuery = query.Encode() - } - canonicalRequest := r.getCanonicalRequest(hashedPayload) - stringToSign := r.getStringToSign(canonicalRequest, t) - signingKey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t) - signature := getSignature(signingKey, stringToSign) - - if r.expires != "" { - r.req.URL.RawQuery += "&X-Amz-Signature=" + signature - } else { - // final Authorization header - parts := []string{ - authHeader + " Credential=" + credential, - "SignedHeaders=" + signedHeaders, - "Signature=" + signature, - } - auth := strings.Join(parts, ", ") - r.Set("Authorization", auth) - } -} From d257dedf42014e5030cd99ffff1576ccfc082821 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 28 Dec 2015 16:42:44 +0100 Subject: [PATCH 22/55] rename LocationParse -> Parse --- location/location.go | 10 +++++----- location/location_test.go | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/location/location.go b/location/location.go index a4b344000..1f3aba1a4 100644 --- a/location/location.go +++ b/location/location.go @@ -29,11 +29,11 @@ var parsers = []parser{ {"s3", s3.ParseConfig}, } -// ParseLocation parses a repository location from the string s. If s starts with a -// backend name followed by a colon, that backend's Parse() function is called. -// Otherwise, the local backend is used which interprets s as the name of a -// directory. -func ParseLocation(s string) (u Location, err error) { +// Parse extracts repository location information from the string s. If s +// starts with a backend name followed by a colon, that backend's Parse() +// function is called. Otherwise, the local backend is used which interprets s +// as the name of a directory. +func Parse(s string) (u Location, err error) { scheme := extractScheme(s) u.Scheme = scheme diff --git a/location/location_test.go b/location/location_test.go index 096f0fa16..fedc66e85 100644 --- a/location/location_test.go +++ b/location/location_test.go @@ -64,9 +64,9 @@ var parseTests = []struct { }, } -func TestParseLocation(t *testing.T) { +func TestParse(t *testing.T) { for i, test := range parseTests { - u, err := ParseLocation(test.s) + u, err := Parse(test.s) if err != nil { t.Errorf("unexpected error: %v", err) continue From 7b1e8fdd061db1b9d3ead14eec3ae0944b8466e9 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 28 Dec 2015 18:21:56 +0100 Subject: [PATCH 23/55] local: correct comment --- backend/local/local.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/local/local.go b/backend/local/local.go index ffcfe68d6..d8291a8bd 100644 --- a/backend/local/local.go +++ b/backend/local/local.go @@ -21,7 +21,7 @@ type Local struct { open map[string][]*os.File // Contains open files. Guarded by 'mu'. } -// Open opens the local backend at dir. +// Open opens the local backend as specified by config. func Open(dir string) (*Local, error) { items := []string{ dir, From f7c909197081f637a90da3899eb9dfac1b5bbcd1 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 28 Dec 2015 18:22:19 +0100 Subject: [PATCH 24/55] sftp: implement open with config --- backend/sftp/sftp.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/backend/sftp/sftp.go b/backend/sftp/sftp.go index 4166bdb7f..f52c85c53 100644 --- a/backend/sftp/sftp.go +++ b/backend/sftp/sftp.go @@ -92,6 +92,23 @@ func Open(dir string, program string, args ...string) (*SFTP, error) { return sftp, nil } +func buildSSHCommand(cfg Config) []string { + args := []string{cfg.Host} + if cfg.User != "" { + args = append(args, "-l") + args = append(args, cfg.User) + } + args = append(args, "-s") + args = append(args, "sftp") + return args +} + +// OpenWithConfig opens an sftp backend as described by the config by running +// "ssh" with the appropiate arguments. +func OpenWithConfig(cfg Config) (*SFTP, error) { + return Open(cfg.Dir, "ssh", buildSSHCommand(cfg)...) +} + // Create creates all the necessary files and directories for a new sftp // backend at dir. Afterwards a new config blob should be created. func Create(dir string, program string, args ...string) (*SFTP, error) { @@ -138,6 +155,12 @@ func Create(dir string, program string, args ...string) (*SFTP, error) { return Open(dir, program, args...) } +// CreateWithConfig creates an sftp backend as described by the config by running +// "ssh" with the appropiate arguments. +func CreateWithConfig(cfg Config) (*SFTP, error) { + return Create(cfg.Dir, "ssh", buildSSHCommand(cfg)...) +} + // Location returns this backend's location (the directory name). func (r *SFTP) Location() string { return r.p From 2b0b44c5cef43f062aafe226f938d884d189e20a Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 28 Dec 2015 18:23:02 +0100 Subject: [PATCH 25/55] s3: implement open with config --- backend/s3/config.go | 42 +++++++++++++++++++++++++------ backend/s3/config_test.go | 14 ++++++++--- backend/s3/s3.go | 52 ++++++++++++++------------------------- backend/s3_test.go | 8 +++++- 4 files changed, 72 insertions(+), 44 deletions(-) diff --git a/backend/s3/config.go b/backend/s3/config.go index 808a9464b..cd4d77b4f 100644 --- a/backend/s3/config.go +++ b/backend/s3/config.go @@ -2,13 +2,15 @@ package s3 import ( "errors" + "net/url" "strings" ) // Config contains all configuration necessary to connect to an s3 compatible // server. type Config struct { - Host string + Region string + URL string KeyID, Secret string Bucket string } @@ -26,15 +28,15 @@ func ParseConfig(s string) (interface{}, error) { } cfg := Config{ - Host: data[0], + Region: data[0], Bucket: data[1], } return cfg, nil } - data := strings.SplitN(s, ":", 3) - if len(data) != 3 { + data := strings.SplitN(s, ":", 2) + if len(data) != 2 { return nil, errors.New("s3: invalid format") } @@ -42,9 +44,35 @@ func ParseConfig(s string) (interface{}, error) { return nil, errors.New(`s3: config does not start with "s3"`) } - cfg := Config{ - Host: data[1], - Bucket: data[2], + s = data[1] + + cfg := Config{} + rest := strings.Split(s, "/") + if len(rest) < 2 { + return nil, errors.New("s3: region or bucket not found") + } + + if len(rest) == 2 { + // assume that just a region name and a bucket has been specified, in + // the format region/bucket + cfg.Region = rest[0] + cfg.Bucket = rest[1] + } else { + // assume that a URL has been specified, parse it and use the path as + // the bucket name. + url, err := url.Parse(s) + if err != nil { + return nil, err + } + + if url.Path == "" { + return nil, errors.New("s3: bucket name not found") + } + + cfg.Bucket = url.Path[1:] + url.Path = "" + + cfg.URL = url.String() } return cfg, nil diff --git a/backend/s3/config_test.go b/backend/s3/config_test.go index 8821f9883..ca71a589f 100644 --- a/backend/s3/config_test.go +++ b/backend/s3/config_test.go @@ -7,11 +7,19 @@ var configTests = []struct { cfg Config }{ {"s3://eu-central-1/bucketname", Config{ - Host: "eu-central-1", + Region: "eu-central-1", Bucket: "bucketname", }}, - {"s3:hostname:foobar", Config{ - Host: "hostname", + {"s3:eu-central-1/foobar", Config{ + Region: "eu-central-1", + Bucket: "foobar", + }}, + {"s3:https://hostname:9999/foobar", Config{ + URL: "https://hostname:9999", + Bucket: "foobar", + }}, + {"s3:http://hostname:9999/foobar", Config{ + URL: "http://hostname:9999", Bucket: "foobar", }}, } diff --git a/backend/s3/s3.go b/backend/s3/s3.go index f98942a8e..0b831b6db 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -4,7 +4,6 @@ import ( "bytes" "errors" "io" - "os" "strings" "github.com/minio/minio-go" @@ -29,45 +28,32 @@ type S3Backend struct { bucketname string } -func getConfig(region, bucket string) minio.Config { - config := minio.Config{ - AccessKeyID: os.Getenv("AWS_ACCESS_KEY_ID"), - SecretAccessKey: os.Getenv("AWS_SECRET_ACCESS_KEY"), - Region: "us-east-1", - } - - if !strings.Contains(region, ".") { - // Amazon region name - switch region { - case "us-east-1": - config.Endpoint = "https://s3.amazonaws.com" - default: - config.Endpoint = "https://s3-" + region + ".amazonaws.com" - config.Region = region - } - } else { - // S3 compatible endpoint, use default region "us-east-1" - if strings.Contains(region, "localhost") || strings.Contains(region, "127.0.0.1") { - config.Endpoint = "http://" + region - } else { - config.Endpoint = "https://" + region - } - } - - return config -} - // Open opens the S3 backend at bucket and region. The bucket is created if it does not exist yet. -func Open(regionname, bucketname string) (backend.Backend, error) { - s3api, err := minio.New(getConfig(regionname, bucketname)) +func Open(cfg Config) (backend.Backend, error) { + mcfg := minio.Config{ + AccessKeyID: cfg.KeyID, + SecretAccessKey: cfg.Secret, + } + + if cfg.URL != "" { + mcfg.Endpoint = cfg.URL + } else { + mcfg.Region = cfg.Region + } + + if mcfg.Region == "" { + mcfg.Region = "us-east-1" + } + + s3api, err := minio.New(mcfg) if err != nil { return nil, err } - be := &S3Backend{s3api: s3api, bucketname: bucketname} + be := &S3Backend{s3api: s3api, bucketname: cfg.Bucket} be.createConnections() - err = s3api.MakeBucket(bucketname, "") + err = s3api.MakeBucket(cfg.Bucket, "") if err != nil { return nil, err } diff --git a/backend/s3_test.go b/backend/s3_test.go index 611221085..b177ad067 100644 --- a/backend/s3_test.go +++ b/backend/s3_test.go @@ -1,6 +1,7 @@ package backend_test import ( + "os" "testing" "github.com/restic/restic/backend/s3" @@ -16,7 +17,12 @@ func TestS3Backend(t *testing.T) { t.Skip("s3 test server not available") } - be, err := s3.Open(TestS3Server, "restictestbucket") + be, err := s3.Open(s3.Config{ + URL: TestS3Server, + Bucket: "restictestbucket", + KeyID: os.Getenv("AWS_ACCESS_KEY_ID"), + Secret: os.Getenv("AWS_SECRET_ACCESS_KEY"), + }) OK(t, err) testBackend(be, t) From 7d5f8214cf51bac0174b4c421bf7a60852fc4aea Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 28 Dec 2015 18:23:20 +0100 Subject: [PATCH 26/55] use new backend open with config --- cmd/restic/global.go | 100 +++++++++++++++++-------------------------- 1 file changed, 40 insertions(+), 60 deletions(-) diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 7de028c3c..9fdf52367 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "io" - "net/url" "os" "strings" @@ -13,6 +12,7 @@ import ( "github.com/restic/restic/backend/local" "github.com/restic/restic/backend/s3" "github.com/restic/restic/backend/sftp" + "github.com/restic/restic/location" "github.com/restic/restic/repository" "golang.org/x/crypto/ssh/terminal" ) @@ -164,78 +164,58 @@ func (o GlobalOptions) OpenRepository() (*repository.Repository, error) { return s, nil } -// Open the backend specified by URI. -// Valid formats are: -// * /foo/bar -> local repository at /foo/bar -// * s3://region/bucket -> amazon s3 bucket -// * sftp://user@host/foo/bar -> remote sftp repository on host for user at path foo/bar -// * sftp://host//tmp/backup -> remote sftp repository on host at path /tmp/backup -// * c:\temp -> local repository at c:\temp - the path must exist -func open(u string) (backend.Backend, error) { - // check if the url is a directory that exists - fi, err := os.Stat(u) - if err == nil && fi.IsDir() { - return local.Open(u) - } - - url, err := url.Parse(u) +// Open the backend specified by a location config. +func open(s string) (backend.Backend, error) { + loc, err := location.Parse(s) if err != nil { return nil, err } - if url.Scheme == "" { - return local.Open(url.Path) + switch loc.Scheme { + case "local": + return local.Open(loc.Config.(string)) + case "sftp": + return sftp.OpenWithConfig(loc.Config.(sftp.Config)) + case "s3": + cfg := loc.Config.(s3.Config) + if cfg.KeyID == "" { + cfg.KeyID = os.Getenv("AWS_ACCESS_KEY_ID") + + } + if cfg.Secret == "" { + cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY") + } + + return s3.Open(loc.Config.(s3.Config)) } - if len(url.Path) < 1 { - return nil, fmt.Errorf("unable to parse url %v", url) - } - - if url.Scheme == "s3" { - return s3.Open(url.Host, url.Path[1:]) - } - - args := []string{url.Host} - if url.User != nil && url.User.Username() != "" { - args = append(args, "-l") - args = append(args, url.User.Username()) - } - args = append(args, "-s") - args = append(args, "sftp") - return sftp.Open(url.Path[1:], "ssh", args...) + return nil, fmt.Errorf("invalid scheme %q", loc.Scheme) } // Create the backend specified by URI. -func create(u string) (backend.Backend, error) { - // check if the url is a directory that exists - fi, err := os.Stat(u) - if err == nil && fi.IsDir() { - return local.Create(u) - } - - url, err := url.Parse(u) +func create(s string) (backend.Backend, error) { + loc, err := location.Parse(s) if err != nil { return nil, err } - if url.Scheme == "" { - return local.Create(url.Path) + switch loc.Scheme { + case "local": + return local.Create(loc.Config.(string)) + case "sftp": + return sftp.CreateWithConfig(loc.Config.(sftp.Config)) + case "s3": + cfg := loc.Config.(s3.Config) + if cfg.KeyID == "" { + cfg.KeyID = os.Getenv("AWS_ACCESS_KEY_ID") + + } + if cfg.Secret == "" { + cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY") + } + + return s3.Open(loc.Config.(s3.Config)) } - if len(url.Path) < 1 { - return nil, fmt.Errorf("unable to parse url %v", url) - } - - if url.Scheme == "s3" { - return s3.Open(url.Host, url.Path[1:]) - } - - args := []string{url.Host} - if url.User != nil && url.User.Username() != "" { - args = append(args, "-l") - args = append(args, url.User.Username()) - } - args = append(args, "-s") - args = append(args, "sftp") - return sftp.Create(url.Path[1:], "ssh", args...) + return nil, fmt.Errorf("invalid scheme %q", loc.Scheme) } From 1ad5c3813c4e3956d30f2046bb8460c93926608d Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 28 Dec 2015 18:23:34 +0100 Subject: [PATCH 27/55] correct CI s3 test server url --- run_integration_tests.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run_integration_tests.go b/run_integration_tests.go index 62693b8da..700f1d032 100644 --- a/run_integration_tests.go +++ b/run_integration_tests.go @@ -256,7 +256,7 @@ var minioConfig = ` ` var minioEnv = map[string]string{ - "RESTIC_TEST_S3_SERVER": "127.0.0.1:9000", + "RESTIC_TEST_S3_SERVER": "http://127.0.0.1:9000", "AWS_ACCESS_KEY_ID": "KEBIYDZ87HCIH5D17YCN", "AWS_SECRET_ACCESS_KEY": "bVX1KhipSBPopEfmhc7rGz8ooxx27xdJ7Gkh1mVe", } From 2b10791df2b4fdf53e016e8b88ec5202fcbdd63f Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 28 Dec 2015 18:30:42 +0100 Subject: [PATCH 28/55] location: Fix test --- location/location_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/location/location_test.go b/location/location_test.go index fedc66e85..405ba0144 100644 --- a/location/location_test.go +++ b/location/location_test.go @@ -46,19 +46,19 @@ var parseTests = []struct { {"s3://eu-central-1/bucketname", Location{Scheme: "s3", Config: s3.Config{ - Host: "eu-central-1", + Region: "eu-central-1", Bucket: "bucketname", }}, }, {"s3://hostname.foo/bucketname", Location{Scheme: "s3", Config: s3.Config{ - Host: "hostname.foo", + Region: "hostname.foo", Bucket: "bucketname", }}, }, - {"s3:hostname.foo:repo", Location{Scheme: "s3", + {"s3:https://hostname.foo/repo", Location{Scheme: "s3", Config: s3.Config{ - Host: "hostname.foo", + URL: "https://hostname.foo", Bucket: "repo", }}, }, From 1922a4272c85983ac67e325827e4afbfe313a7b7 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 28 Dec 2015 18:55:15 +0100 Subject: [PATCH 29/55] s3: fix usage Ignore error response for existing bucket, add more debug code. --- backend/s3/s3.go | 13 ++++++++++++- cmd/restic/global.go | 15 +++++++++++++-- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 0b831b6db..62616a993 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -9,6 +9,7 @@ import ( "github.com/minio/minio-go" "github.com/restic/restic/backend" + "github.com/restic/restic/debug" ) const maxKeysInList = 1000 @@ -28,7 +29,8 @@ type S3Backend struct { bucketname string } -// Open opens the S3 backend at bucket and region. The bucket is created if it does not exist yet. +// Open opens the S3 backend at bucket and region. The bucket is created if it +// does not exist yet. func Open(cfg Config) (backend.Backend, error) { mcfg := minio.Config{ AccessKeyID: cfg.KeyID, @@ -54,6 +56,15 @@ func Open(cfg Config) (backend.Backend, error) { be.createConnections() err = s3api.MakeBucket(cfg.Bucket, "") + + if err != nil { + e, ok := err.(minio.ErrorResponse) + if ok && e.Code == "BucketAlreadyExists" { + debug.Log("s3.Open", "ignoring error that bucket %q already exists", cfg.Bucket) + err = nil + } + } + if err != nil { return nil, err } diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 9fdf52367..2c73da530 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -12,6 +12,7 @@ import ( "github.com/restic/restic/backend/local" "github.com/restic/restic/backend/s3" "github.com/restic/restic/backend/sftp" + "github.com/restic/restic/debug" "github.com/restic/restic/location" "github.com/restic/restic/repository" "golang.org/x/crypto/ssh/terminal" @@ -166,6 +167,7 @@ func (o GlobalOptions) OpenRepository() (*repository.Repository, error) { // Open the backend specified by a location config. func open(s string) (backend.Backend, error) { + debug.Log("open", "parsing location %v", s) loc, err := location.Parse(s) if err != nil { return nil, err @@ -173,8 +175,10 @@ func open(s string) (backend.Backend, error) { switch loc.Scheme { case "local": + debug.Log("open", "opening local repository at %#v", loc.Config) return local.Open(loc.Config.(string)) case "sftp": + debug.Log("open", "opening sftp repository at %#v", loc.Config) return sftp.OpenWithConfig(loc.Config.(sftp.Config)) case "s3": cfg := loc.Config.(s3.Config) @@ -186,14 +190,17 @@ func open(s string) (backend.Backend, error) { cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY") } - return s3.Open(loc.Config.(s3.Config)) + debug.Log("open", "opening s3 repository at %#v", cfg) + return s3.Open(cfg) } + debug.Log("open", "invalid repository location: %v", s) return nil, fmt.Errorf("invalid scheme %q", loc.Scheme) } // Create the backend specified by URI. func create(s string) (backend.Backend, error) { + debug.Log("open", "parsing location %v", s) loc, err := location.Parse(s) if err != nil { return nil, err @@ -201,8 +208,10 @@ func create(s string) (backend.Backend, error) { switch loc.Scheme { case "local": + debug.Log("open", "create local repository at %#v", loc.Config) return local.Create(loc.Config.(string)) case "sftp": + debug.Log("open", "create sftp repository at %#v", loc.Config) return sftp.CreateWithConfig(loc.Config.(sftp.Config)) case "s3": cfg := loc.Config.(s3.Config) @@ -214,8 +223,10 @@ func create(s string) (backend.Backend, error) { cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY") } - return s3.Open(loc.Config.(s3.Config)) + debug.Log("open", "create s3 repository at %#v", loc.Config) + return s3.Open(cfg) } + debug.Log("open", "invalid repository scheme: %v", s) return nil, fmt.Errorf("invalid scheme %q", loc.Scheme) } From a17b6bbb648d365dd1b73d073cc8b58a58dd94be Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 28 Dec 2015 21:23:53 +0100 Subject: [PATCH 30/55] Update minio-go library --- Godeps/Godeps.json | 4 +- .../github.com/minio/minio-go/INSTALLGO.md | 83 + .../github.com/minio/minio-go/MAINTAINERS.md | 19 + .../src/github.com/minio/minio-go/README.md | 87 +- .../src/github.com/minio/minio-go/api-core.go | 906 ----------- .../minio/minio-go/api-definitions.go | 93 ++ .../minio/minio-go/api-error-response.go | 232 +++ .../minio/minio-go/api-fget-object.go | 102 ++ .../minio/minio-go/api-fput-object.go | 281 ++++ .../src/github.com/minio/minio-go/api-get.go | 379 +++++ .../src/github.com/minio/minio-go/api-list.go | 486 ++++++ .../minio/minio-go/api-multipart-core.go | 331 ---- .../minio/minio-go/api-presigned.go | 147 ++ .../minio/minio-go/api-put-bucket.go | 219 +++ .../minio/minio-go/api-put-object-partial.go | 197 +++ .../minio/minio-go/api-put-object.go | 559 +++++++ .../github.com/minio/minio-go/api-remove.go | 169 ++ .../{definitions.go => api-s3-definitions.go} | 13 +- .../src/github.com/minio/minio-go/api-stat.go | 113 ++ .../src/github.com/minio/minio-go/api.go | 1381 ++++------------- .../minio/minio-go/api_functional_test.go | 158 ++ .../minio/minio-go/api_handlers_test.go | 170 -- .../minio/minio-go/api_private_test.go | 195 ++- .../minio/minio-go/api_public_test.go | 287 ---- .../github.com/minio/minio-go/appveyor.yml | 3 - .../github.com/minio/minio-go/bucket-acl.go | 16 +- .../github.com/minio/minio-go/bucket-cache.go | 153 ++ .../src/github.com/minio/minio-go/chopper.go | 136 -- .../minio/minio-go/common-methods.go | 52 + .../src/github.com/minio/minio-go/common.go | 115 -- .../github.com/minio/minio-go/constants.go | 38 + .../src/github.com/minio/minio-go/errors.go | 168 -- .../minio-go/examples/play/bucketexists.go | 16 +- .../minio-go/examples/play/fgetobject.go | 44 + .../minio-go/examples/play/fputobject.go | 44 + .../minio-go/examples/play/getbucketacl.go | 15 +- .../minio/minio-go/examples/play/getobject.go | 25 +- .../examples/play/getobjectpartial.go | 91 ++ .../minio-go/examples/play/listbuckets.go | 22 +- .../examples/play/listincompleteuploads.go | 32 +- .../minio-go/examples/play/listobjects.go | 29 +- .../minio-go/examples/play/makebucket.go | 15 +- ...partialobject.go => presignedgetobject.go} | 27 +- .../examples/play/presignedpostpolicy.go | 56 + .../presignedputobject.go} | 29 +- .../minio/minio-go/examples/play/putobject.go | 25 +- .../examples/play/putobjectpartial.go | 56 + .../minio-go/examples/play/removebucket.go | 17 +- .../examples/play/removeincompleteupload.go | 15 +- .../minio-go/examples/play/removeobject.go | 16 +- .../minio-go/examples/play/setbucketacl.go | 15 +- .../minio-go/examples/play/statobject.go | 14 +- .../minio-go/examples/s3/bucketexists.go | 19 +- .../minio/minio-go/examples/s3/fgetobject.go | 45 + .../minio/minio-go/examples/s3/fputobject.go | 45 + .../minio-go/examples/s3/getbucketacl.go | 18 +- .../minio/minio-go/examples/s3/getobject.go | 28 +- .../minio-go/examples/s3/getobjectpartial.go | 92 ++ .../minio/minio-go/examples/s3/listbuckets.go | 27 +- .../examples/s3/listincompleteuploads.go | 33 +- .../minio/minio-go/examples/s3/listobjects.go | 36 +- .../minio/minio-go/examples/s3/makebucket.go | 18 +- .../examples/s3/presignedgetobject.go | 20 +- .../examples/s3/presignedpostpolicy.go | 27 +- .../examples/s3/presignedputobject.go | 20 +- .../minio/minio-go/examples/s3/putobject.go | 28 +- .../minio-go/examples/s3/putobjectpartial.go | 57 + .../minio-go/examples/s3/removebucket.go | 19 +- .../examples/s3/removeincompleteupload.go | 18 +- .../minio-go/examples/s3/removeobject.go | 17 +- .../minio-go/examples/s3/setbucketacl.go | 18 +- .../minio/minio-go/examples/s3/statobject.go | 17 +- .../src/github.com/minio/minio-go/io.go | 67 - .../github.com/minio/minio-go/post-policy.go | 138 +- .../minio/minio-go/request-common.go | 283 ---- ...{request-v2.go => request-signature-v2.go} | 193 ++- .../minio/minio-go/request-signature-v4.go | 282 ++++ .../github.com/minio/minio-go/request-v4.go | 228 --- .../minio/minio-go/signature-type.go | 21 + .../src/github.com/minio/minio-go/tempfile.go | 76 + .../src/github.com/minio/minio-go/utils.go | 319 ++++ 81 files changed, 5828 insertions(+), 4276 deletions(-) create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/INSTALLGO.md create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/MAINTAINERS.md delete mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-core.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-definitions.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-error-response.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-fget-object.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-fput-object.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-get.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-list.go delete mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-multipart-core.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-presigned.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-partial.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-remove.go rename Godeps/_workspace/src/github.com/minio/minio-go/{definitions.go => api-s3-definitions.go} (92%) create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go delete mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api_handlers_test.go delete mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api_public_test.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/bucket-cache.go delete mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/chopper.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/common-methods.go delete mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/common.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/constants.go delete mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/errors.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/fgetobject.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/fputobject.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobjectpartial.go rename Godeps/_workspace/src/github.com/minio/minio-go/examples/play/{getpartialobject.go => presignedgetobject.go} (56%) create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedpostpolicy.go rename Godeps/_workspace/src/github.com/minio/minio-go/examples/{s3/getpartialobject.go => play/presignedputobject.go} (56%) create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobjectpartial.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/fgetobject.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/fputobject.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobjectpartial.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobjectpartial.go delete mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/io.go delete mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/request-common.go rename Godeps/_workspace/src/github.com/minio/minio-go/{request-v2.go => request-signature-v2.go} (52%) create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v4.go delete mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/request-v4.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/signature-type.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/tempfile.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/utils.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index ec964d080..3cb12b7a8 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -24,8 +24,8 @@ }, { "ImportPath": "github.com/minio/minio-go", - "Comment": "v0.2.5-62-g61f6570", - "Rev": "61f6570da0edd761974216c9ed5da485d3cc0c99" + "Comment": "v0.2.5-177-g691a38d", + "Rev": "691a38d161d6dfc0e8e78dc5360bc39f48a8626d" }, { "ImportPath": "github.com/pkg/sftp", diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/INSTALLGO.md b/Godeps/_workspace/src/github.com/minio/minio-go/INSTALLGO.md new file mode 100644 index 000000000..c3762bbfc --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/INSTALLGO.md @@ -0,0 +1,83 @@ +## Ubuntu (Kylin) 14.04 +### Build Dependencies +This installation guide is based on Ubuntu 14.04+ on x86-64 platform. + +##### Install Git, GCC +```sh +$ sudo apt-get install git build-essential +``` + +##### Install Go 1.5+ + +Download Go 1.5+ from [https://golang.org/dl/](https://golang.org/dl/). + +```sh +$ wget https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz +$ mkdir -p ${HOME}/bin/ +$ mkdir -p ${HOME}/go/ +$ tar -C ${HOME}/bin/ -xzf go1.5.1.linux-amd64.tar.gz +``` +##### Setup GOROOT and GOPATH + +Add the following exports to your ``~/.bashrc``. Environment variable GOROOT specifies the location of your golang binaries +and GOPATH specifies the location of your project workspace. + +```sh +export GOROOT=${HOME}/bin/go +export GOPATH=${HOME}/go +export PATH=$PATH:${HOME}/bin/go/bin:${GOPATH}/bin +``` +```sh +$ source ~/.bashrc +``` + +##### Testing it all +```sh +$ go env +``` + +## OS X (Yosemite) 10.10 +### Build Dependencies +This installation document assumes OS X Yosemite 10.10+ on x86-64 platform. + +##### Install brew +```sh +$ ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" +``` + +##### Install Git, Python +```sh +$ brew install git python +``` + +##### Install Go 1.5+ + +Install golang binaries using `brew` + +```sh +$ brew install go +$ mkdir -p $HOME/go +``` + +##### Setup GOROOT and GOPATH + +Add the following exports to your ``~/.bash_profile``. Environment variable GOROOT specifies the location of your golang binaries +and GOPATH specifies the location of your project workspace. + +```sh +export GOPATH=${HOME}/go +export GOVERSION=$(brew list go | head -n 1 | cut -d '/' -f 6) +export GOROOT=$(brew --prefix)/Cellar/go/${GOVERSION}/libexec +export PATH=$PATH:${GOPATH}/bin +``` + +##### Source the new enviornment + +```sh +$ source ~/.bash_profile +``` + +##### Testing it all +```sh +$ go env +``` diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/MAINTAINERS.md b/Godeps/_workspace/src/github.com/minio/minio-go/MAINTAINERS.md new file mode 100644 index 000000000..6dbef6265 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/MAINTAINERS.md @@ -0,0 +1,19 @@ +# For maintainers only + +## Responsibilities + +Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) + +### Making new releases + +Edit `libraryVersion` constant in `api.go`. + +``` +$ grep libraryVersion api.go + libraryVersion = "0.3.0" +``` + +``` +$ git tag 0.3.0 +$ git push --tags +``` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/README.md b/Godeps/_workspace/src/github.com/minio/minio-go/README.md index bda9123a5..5417d8f14 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/README.md +++ b/Godeps/_workspace/src/github.com/minio/minio-go/README.md @@ -1,12 +1,35 @@ # Minio Go Library for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +## Description + +Minio Go library is a simple client library for S3 compatible cloud storage servers. Supports AWS Signature Version 4 and 2. AWS Signature Version 4 is chosen as default. + +List of supported cloud storage providers. + + - AWS Signature Version 4 + - Amazon S3 + - Minio + + - AWS Signature Version 2 + - Google Cloud Storage (Compatibility Mode) + - Openstack Swift + Swift3 middleware + - Ceph Object Gateway + - Riak CS + ## Install +If you do not have a working Golang environment, please follow [Install Golang](./INSTALLGO.md). + ```sh $ go get github.com/minio/minio-go ``` + ## Example +### ListBuckets() + +This example shows how to List your buckets. + ```go package main @@ -17,47 +40,51 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://s3.amazonaws.com", - } - s3Client, err := minio.New(config) + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } - for bucket := range s3Client.ListBuckets() { - if bucket.Err != nil { - log.Fatalln(bucket.Err) - } - log.Println(bucket.Stat) + buckets, err := s3Client.ListBuckets() + if err != nil { + log.Fatalln(err) + } + for _, bucket := range buckets { + log.Println(bucket) } } ``` ## Documentation -### Bucket Level -* [MakeBucket(bucket, acl) error](examples/s3/makebucket.go) -* [BucketExists(bucket) error](examples/s3/bucketexists.go) -* [RemoveBucket(bucket) error](examples/s3/removebucket.go) -* [GetBucketACL(bucket) (BucketACL, error)](examples/s3/getbucketacl.go) -* [SetBucketACL(bucket, BucketACL) error)](examples/s3/setbucketacl.go) -* [ListBuckets() <-chan BucketStat](examples/s3/listbuckets.go) -* [ListObjects(bucket, prefix, recursive) <-chan ObjectStat](examples/s3/listobjects.go) -* [ListIncompleteUploads(bucket, prefix, recursive) <-chan ObjectMultipartStat](examples/s3/listincompleteuploads.go) +### Bucket Operations. +* [MakeBucket(bucketName, BucketACL, location) error](examples/s3/makebucket.go) +* [BucketExists(bucketName) error](examples/s3/bucketexists.go) +* [RemoveBucket(bucketName) error](examples/s3/removebucket.go) +* [GetBucketACL(bucketName) (BucketACL, error)](examples/s3/getbucketacl.go) +* [SetBucketACL(bucketName, BucketACL) error)](examples/s3/setbucketacl.go) +* [ListBuckets() []BucketStat](examples/s3/listbuckets.go) +* [ListObjects(bucketName, objectPrefix, recursive, chan<- struct{}) <-chan ObjectStat](examples/s3/listobjects.go) +* [ListIncompleteUploads(bucketName, prefix, recursive, chan<- struct{}) <-chan ObjectMultipartStat](examples/s3/listincompleteuploads.go) -### Object Level -* [PutObject(bucket, object, size, io.Reader) error](examples/s3/putobject.go) -* [GetObject(bucket, object) (io.Reader, ObjectStat, error)](examples/s3/getobject.go) -* [GetPartialObject(bucket, object, offset, length) (io.Reader, ObjectStat, error)](examples/s3/getpartialobject.go) -* [StatObject(bucket, object) (ObjectStat, error)](examples/s3/statobject.go) -* [RemoveObject(bucket, object) error](examples/s3/removeobject.go) -* [RemoveIncompleteUpload(bucket, object) <-chan error](examples/s3/removeincompleteupload.go) +### Object Operations. +* [PutObject(bucketName, objectName, io.Reader, size, contentType) error](examples/s3/putobject.go) +* [GetObject(bucketName, objectName) (io.ReadCloser, ObjectStat, error)](examples/s3/getobject.go) +* [StatObject(bucketName, objectName) (ObjectStat, error)](examples/s3/statobject.go) +* [RemoveObject(bucketName, objectName) error](examples/s3/removeobject.go) +* [RemoveIncompleteUpload(bucketName, objectName) <-chan error](examples/s3/removeincompleteupload.go) -### Presigned Bucket/Object Level -* [PresignedGetObject(bucket, object, time.Duration) (string, error)](examples/s3/presignedgetobject.go) -* [PresignedPutObject(bucket, object, time.Duration) (string, error)](examples/s3/presignedputobject.go) +### File Object Operations. +* [FPutObject(bucketName, objectName, filePath, contentType) (size, error)](examples/s3/fputobject.go) +* [FGetObject(bucketName, objectName, filePath) error](examples/s3/fgetobject.go) + +### Presigned Operations. +* [PresignedGetObject(bucketName, objectName, time.Duration) (string, error)](examples/s3/presignedgetobject.go) +* [PresignedPutObject(bucketName, objectName, time.Duration) (string, error)](examples/s3/presignedputobject.go) * [PresignedPostPolicy(NewPostPolicy()) (map[string]string, error)](examples/s3/presignedpostpolicy.go) ### API Reference diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-core.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-core.go deleted file mode 100644 index 73fffbd29..000000000 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-core.go +++ /dev/null @@ -1,906 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "net/http" - "strconv" - "strings" - "time" -) - -const ( - separator = "/" -) - -// apiCore container to hold unexported internal functions -type apiCore struct { - config *Config -} - -// closeResp close non nil response with any response Body -func closeResp(resp *http.Response) { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } -} - -// putBucketRequest wrapper creates a new putBucket request -func (a apiCore) putBucketRequest(bucket, acl, location string) (*request, error) { - var r *request - var err error - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "PUT", - HTTPPath: separator + bucket, - } - var createBucketConfigBuffer *bytes.Reader - // If location is set use it and create proper bucket configuration - switch { - case location != "": - createBucketConfig := new(createBucketConfiguration) - createBucketConfig.Location = location - var createBucketConfigBytes []byte - switch { - case a.config.AcceptType == "application/xml": - createBucketConfigBytes, err = xml.Marshal(createBucketConfig) - case a.config.AcceptType == "application/json": - createBucketConfigBytes, err = json.Marshal(createBucketConfig) - default: - createBucketConfigBytes, err = xml.Marshal(createBucketConfig) - } - if err != nil { - return nil, err - } - createBucketConfigBuffer = bytes.NewReader(createBucketConfigBytes) - } - switch { - case createBucketConfigBuffer == nil: - r, err = newRequest(op, a.config, nil) - if err != nil { - return nil, err - } - default: - r, err = newRequest(op, a.config, createBucketConfigBuffer) - if err != nil { - return nil, err - } - r.req.ContentLength = int64(createBucketConfigBuffer.Len()) - } - // by default bucket is private - switch { - case acl != "": - r.Set("x-amz-acl", acl) - default: - r.Set("x-amz-acl", "private") - } - - return r, nil -} - -/// Bucket Write Operations - -// putBucket create a new bucket -// -// Requires valid AWS Access Key ID to authenticate requests -// Anonymous requests are never allowed to create buckets -// -// optional arguments are acl and location - by default all buckets are created -// with ``private`` acl and location set to US Standard if one wishes to set -// different ACLs and Location one can set them properly. -// -// ACL valid values -// ------------------ -// private - owner gets full access [DEFAULT] -// public-read - owner gets full access, others get read access -// public-read-write - owner gets full access, others get full access too -// authenticated-read - owner gets full access, authenticated users get read access -// ------------------ -// -// Location valid values -// ------------------ -// [ us-west-1 | us-west-2 | eu-west-1 | eu-central-1 | ap-southeast-1 | ap-northeast-1 | ap-southeast-2 | sa-east-1 ] -// -// Default - US standard -func (a apiCore) putBucket(bucket, acl, location string) error { - req, err := a.putBucketRequest(bucket, acl, location) - if err != nil { - return err - } - resp, err := req.Do() - defer closeResp(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return BodyToErrorResponse(resp.Body, a.config.AcceptType) - } - } - return nil -} - -// putBucketRequestACL wrapper creates a new putBucketACL request -func (a apiCore) putBucketACLRequest(bucket, acl string) (*request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "PUT", - HTTPPath: separator + bucket + "?acl", - } - req, err := newRequest(op, a.config, nil) - if err != nil { - return nil, err - } - req.Set("x-amz-acl", acl) - return req, nil -} - -// putBucketACL set the permissions on an existing bucket using Canned ACL's -func (a apiCore) putBucketACL(bucket, acl string) error { - req, err := a.putBucketACLRequest(bucket, acl) - if err != nil { - return err - } - resp, err := req.Do() - defer closeResp(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return BodyToErrorResponse(resp.Body, a.config.AcceptType) - } - } - return nil -} - -// getBucketACLRequest wrapper creates a new getBucketACL request -func (a apiCore) getBucketACLRequest(bucket string) (*request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "GET", - HTTPPath: separator + bucket + "?acl", - } - req, err := newRequest(op, a.config, nil) - if err != nil { - return nil, err - } - return req, nil -} - -// getBucketACL get the acl information on an existing bucket -func (a apiCore) getBucketACL(bucket string) (accessControlPolicy, error) { - req, err := a.getBucketACLRequest(bucket) - if err != nil { - return accessControlPolicy{}, err - } - resp, err := req.Do() - defer closeResp(resp) - if err != nil { - return accessControlPolicy{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return accessControlPolicy{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) - } - } - policy := accessControlPolicy{} - err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &policy) - if err != nil { - return accessControlPolicy{}, err - } - // In-case of google private bucket policy doesn't have any Grant list - if a.config.Region == "google" { - return policy, nil - } - if policy.AccessControlList.Grant == nil { - errorResponse := ErrorResponse{ - Code: "InternalError", - Message: "Access control Grant list is empty, please report this at https://github.com/minio/minio-go/issues.", - Resource: separator + bucket, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - } - return accessControlPolicy{}, errorResponse - } - return policy, nil -} - -// getBucketLocationRequest wrapper creates a new getBucketLocation request -func (a apiCore) getBucketLocationRequest(bucket string) (*request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "GET", - HTTPPath: separator + bucket + "?location", - } - req, err := newRequest(op, a.config, nil) - if err != nil { - return nil, err - } - return req, nil -} - -// getBucketLocation uses location subresource to return a bucket's region -func (a apiCore) getBucketLocation(bucket string) (string, error) { - req, err := a.getBucketLocationRequest(bucket) - if err != nil { - return "", err - } - resp, err := req.Do() - defer closeResp(resp) - if err != nil { - return "", err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return "", BodyToErrorResponse(resp.Body, a.config.AcceptType) - } - } - var locationConstraint string - err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &locationConstraint) - if err != nil { - return "", err - } - return locationConstraint, nil -} - -// listObjectsRequest wrapper creates a new listObjects request -func (a apiCore) listObjectsRequest(bucket, marker, prefix, delimiter string, maxkeys int) (*request, error) { - // resourceQuery - get resources properly escaped and lined up before using them in http request - resourceQuery := func() (*string, error) { - switch { - case marker != "": - marker = fmt.Sprintf("&marker=%s", getURLEncodedPath(marker)) - fallthrough - case prefix != "": - prefix = fmt.Sprintf("&prefix=%s", getURLEncodedPath(prefix)) - fallthrough - case delimiter != "": - delimiter = fmt.Sprintf("&delimiter=%s", delimiter) - } - query := fmt.Sprintf("?max-keys=%d", maxkeys) + marker + prefix + delimiter - return &query, nil - } - query, err := resourceQuery() - if err != nil { - return nil, err - } - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "GET", - HTTPPath: separator + bucket + *query, - } - r, err := newRequest(op, a.config, nil) - if err != nil { - return nil, err - } - return r, nil -} - -/// Bucket Read Operations - -// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. -// -// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. -// request paramters :- -// --------- -// ?marker - Specifies the key to start with when listing objects in a bucket. -// ?delimiter - A delimiter is a character you use to group keys. -// ?prefix - Limits the response to keys that begin with the specified prefix. -// ?max-keys - Sets the maximum number of keys returned in the response body. -func (a apiCore) listObjects(bucket, marker, prefix, delimiter string, maxkeys int) (listBucketResult, error) { - if err := invalidBucketError(bucket); err != nil { - return listBucketResult{}, err - } - req, err := a.listObjectsRequest(bucket, marker, prefix, delimiter, maxkeys) - if err != nil { - return listBucketResult{}, err - } - resp, err := req.Do() - defer closeResp(resp) - if err != nil { - return listBucketResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return listBucketResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) - } - } - listBucketResult := listBucketResult{} - err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &listBucketResult) - if err != nil { - return listBucketResult, err - } - // close body while returning, along with any error - return listBucketResult, nil -} - -// headBucketRequest wrapper creates a new headBucket request -func (a apiCore) headBucketRequest(bucket string) (*request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "HEAD", - HTTPPath: separator + bucket, - } - return newRequest(op, a.config, nil) -} - -// headBucket useful to determine if a bucket exists and you have permission to access it. -func (a apiCore) headBucket(bucket string) error { - if err := invalidBucketError(bucket); err != nil { - return err - } - req, err := a.headBucketRequest(bucket) - if err != nil { - return err - } - resp, err := req.Do() - defer closeResp(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - // Head has no response body, handle it - var errorResponse ErrorResponse - switch resp.StatusCode { - case http.StatusNotFound: - errorResponse = ErrorResponse{ - Code: "NoSuchBucket", - Message: "The specified bucket does not exist.", - Resource: separator + bucket, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - } - case http.StatusForbidden: - errorResponse = ErrorResponse{ - Code: "AccessDenied", - Message: "Access Denied.", - Resource: separator + bucket, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - } - default: - errorResponse = ErrorResponse{ - Code: resp.Status, - Message: resp.Status, - Resource: separator + bucket, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - } - } - return errorResponse - } - } - return nil -} - -// deleteBucketRequest wrapper creates a new deleteBucket request -func (a apiCore) deleteBucketRequest(bucket string) (*request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "DELETE", - HTTPPath: separator + bucket, - } - return newRequest(op, a.config, nil) -} - -// deleteBucket deletes the bucket named in the URI -// -// NOTE: - -// All objects (including all object versions and delete markers) -// in the bucket must be deleted before successfully attempting this request -func (a apiCore) deleteBucket(bucket string) error { - if err := invalidBucketError(bucket); err != nil { - return err - } - req, err := a.deleteBucketRequest(bucket) - if err != nil { - return err - } - resp, err := req.Do() - defer closeResp(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusNoContent { - var errorResponse ErrorResponse - switch resp.StatusCode { - case http.StatusNotFound: - errorResponse = ErrorResponse{ - Code: "NoSuchBucket", - Message: "The specified bucket does not exist.", - Resource: separator + bucket, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - } - case http.StatusForbidden: - errorResponse = ErrorResponse{ - Code: "AccessDenied", - Message: "Access Denied.", - Resource: separator + bucket, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - } - case http.StatusConflict: - errorResponse = ErrorResponse{ - Code: "Conflict", - Message: "Bucket not empty.", - Resource: separator + bucket, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - } - default: - errorResponse = ErrorResponse{ - Code: resp.Status, - Message: resp.Status, - Resource: separator + bucket, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - } - } - return errorResponse - } - } - return nil -} - -/// Object Read/Write/Stat Operations - -func (a apiCore) putObjectUnAuthenticatedRequest(bucket, object, contentType string, size int64, body io.Reader) (*request, error) { - if strings.TrimSpace(contentType) == "" { - contentType = "application/octet-stream" - } - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "PUT", - HTTPPath: separator + bucket + separator + object, - } - r, err := newUnauthenticatedRequest(op, a.config, body) - if err != nil { - return nil, err - } - // Content-MD5 is not set consciously - r.Set("Content-Type", contentType) - r.req.ContentLength = size - return r, nil -} - -// putObjectUnAuthenticated - add an object to a bucket -// NOTE: You must have WRITE permissions on a bucket to add an object to it. -func (a apiCore) putObjectUnAuthenticated(bucket, object, contentType string, size int64, body io.Reader) (ObjectStat, error) { - req, err := a.putObjectUnAuthenticatedRequest(bucket, object, contentType, size, body) - if err != nil { - return ObjectStat{}, err - } - resp, err := req.Do() - defer closeResp(resp) - if err != nil { - return ObjectStat{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ObjectStat{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) - } - } - var metadata ObjectStat - metadata.ETag = strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes - return metadata, nil -} - -// putObjectRequest wrapper creates a new PutObject request -func (a apiCore) putObjectRequest(bucket, object, contentType string, md5SumBytes []byte, size int64, body io.ReadSeeker) (*request, error) { - if strings.TrimSpace(contentType) == "" { - contentType = "application/octet-stream" - } - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "PUT", - HTTPPath: separator + bucket + separator + object, - } - r, err := newRequest(op, a.config, body) - if err != nil { - return nil, err - } - // set Content-MD5 as base64 encoded md5 - if md5SumBytes != nil { - r.Set("Content-MD5", base64.StdEncoding.EncodeToString(md5SumBytes)) - } - r.Set("Content-Type", contentType) - r.req.ContentLength = size - return r, nil -} - -// putObject - add an object to a bucket -// NOTE: You must have WRITE permissions on a bucket to add an object to it. -func (a apiCore) putObject(bucket, object, contentType string, md5SumBytes []byte, size int64, body io.ReadSeeker) (ObjectStat, error) { - req, err := a.putObjectRequest(bucket, object, contentType, md5SumBytes, size, body) - if err != nil { - return ObjectStat{}, err - } - resp, err := req.Do() - defer closeResp(resp) - if err != nil { - return ObjectStat{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ObjectStat{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) - } - } - var metadata ObjectStat - metadata.ETag = strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes - return metadata, nil -} - -func (a apiCore) presignedPostPolicy(p *PostPolicy) map[string]string { - t := time.Now().UTC() - r := new(request) - r.config = a.config - if r.config.Signature.isV2() { - policyBase64 := p.base64() - p.formData["policy"] = policyBase64 - p.formData["AWSAccessKeyId"] = r.config.AccessKeyID - p.formData["signature"] = r.PostPresignSignatureV2(policyBase64) - return p.formData - } - credential := getCredential(r.config.AccessKeyID, r.config.Region, t) - p.addNewPolicy(policy{"eq", "$x-amz-date", t.Format(iso8601DateFormat)}) - p.addNewPolicy(policy{"eq", "$x-amz-algorithm", authHeader}) - p.addNewPolicy(policy{"eq", "$x-amz-credential", credential}) - - policyBase64 := p.base64() - p.formData["policy"] = policyBase64 - p.formData["x-amz-algorithm"] = authHeader - p.formData["x-amz-credential"] = credential - p.formData["x-amz-date"] = t.Format(iso8601DateFormat) - p.formData["x-amz-signature"] = r.PostPresignSignatureV4(policyBase64, t) - return p.formData -} - -func (a apiCore) presignedPutObject(bucket, object string, expires int64) (string, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "PUT", - HTTPPath: separator + bucket + separator + object, - } - r, err := newPresignedRequest(op, a.config, expires) - if err != nil { - return "", err - } - if r.config.Signature.isV2() { - return r.PreSignV2() - } - return r.PreSignV4() -} - -func (a apiCore) presignedGetObjectRequest(bucket, object string, expires, offset, length int64) (*request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "GET", - HTTPPath: separator + bucket + separator + object, - } - r, err := newPresignedRequest(op, a.config, expires) - if err != nil { - return nil, err - } - switch { - case length > 0 && offset > 0: - r.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) - case offset > 0 && length == 0: - r.Set("Range", fmt.Sprintf("bytes=%d-", offset)) - case length > 0 && offset == 0: - r.Set("Range", fmt.Sprintf("bytes=-%d", length)) - } - return r, nil -} - -func (a apiCore) presignedGetObject(bucket, object string, expires, offset, length int64) (string, error) { - if err := invalidArgumentError(object); err != nil { - return "", err - } - r, err := a.presignedGetObjectRequest(bucket, object, expires, offset, length) - if err != nil { - return "", err - } - if r.config.Signature.isV2() { - return r.PreSignV2() - } - return r.PreSignV4() -} - -// getObjectRequest wrapper creates a new getObject request -func (a apiCore) getObjectRequest(bucket, object string, offset, length int64) (*request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "GET", - HTTPPath: separator + bucket + separator + object, - } - r, err := newRequest(op, a.config, nil) - if err != nil { - return nil, err - } - switch { - case length > 0 && offset >= 0: - r.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) - case offset > 0 && length == 0: - r.Set("Range", fmt.Sprintf("bytes=%d-", offset)) - // The final length bytes - case length < 0 && offset == 0: - r.Set("Range", fmt.Sprintf("bytes=%d", length)) - } - return r, nil -} - -// getObject - retrieve object from Object Storage -// -// Additionally this function also takes range arguments to download the specified -// range bytes of an object. Setting offset and length = 0 will download the full object. -// -// For more information about the HTTP Range header. -// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. -func (a apiCore) getObject(bucket, object string, offset, length int64) (io.ReadCloser, ObjectStat, error) { - if err := invalidArgumentError(object); err != nil { - return nil, ObjectStat{}, err - } - req, err := a.getObjectRequest(bucket, object, offset, length) - if err != nil { - return nil, ObjectStat{}, err - } - resp, err := req.Do() - if err != nil { - return nil, ObjectStat{}, err - } - if resp != nil { - switch resp.StatusCode { - case http.StatusOK: - case http.StatusPartialContent: - default: - return nil, ObjectStat{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) - } - } - md5sum := strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes - date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) - if err != nil { - return nil, ObjectStat{}, ErrorResponse{ - Code: "InternalError", - Message: "Last-Modified time format not recognized, please report this issue at https://github.com/minio/minio-go/issues.", - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - } - } - contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) - if contentType == "" { - contentType = "application/octet-stream" - } - var objectstat ObjectStat - objectstat.ETag = md5sum - objectstat.Key = object - objectstat.Size = resp.ContentLength - objectstat.LastModified = date - objectstat.ContentType = contentType - - // do not close body here, caller will close - return resp.Body, objectstat, nil -} - -// deleteObjectRequest wrapper creates a new deleteObject request -func (a apiCore) deleteObjectRequest(bucket, object string) (*request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "DELETE", - HTTPPath: separator + bucket + separator + object, - } - return newRequest(op, a.config, nil) -} - -// deleteObject deletes a given object from a bucket -func (a apiCore) deleteObject(bucket, object string) error { - if err := invalidBucketError(bucket); err != nil { - return err - } - if err := invalidArgumentError(object); err != nil { - return err - } - req, err := a.deleteObjectRequest(bucket, object) - if err != nil { - return err - } - resp, err := req.Do() - defer closeResp(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusNoContent { - var errorResponse ErrorResponse - switch resp.StatusCode { - case http.StatusNotFound: - errorResponse = ErrorResponse{ - Code: "NoSuchKey", - Message: "The specified key does not exist.", - Resource: separator + bucket + separator + object, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - } - case http.StatusForbidden: - errorResponse = ErrorResponse{ - Code: "AccessDenied", - Message: "Access Denied.", - Resource: separator + bucket + separator + object, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - } - default: - errorResponse = ErrorResponse{ - Code: resp.Status, - Message: resp.Status, - Resource: separator + bucket + separator + object, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - } - } - return errorResponse - } - } - return nil -} - -// headObjectRequest wrapper creates a new headObject request -func (a apiCore) headObjectRequest(bucket, object string) (*request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "HEAD", - HTTPPath: separator + bucket + separator + object, - } - return newRequest(op, a.config, nil) -} - -// headObject retrieves metadata from an object without returning the object itself -func (a apiCore) headObject(bucket, object string) (ObjectStat, error) { - if err := invalidBucketError(bucket); err != nil { - return ObjectStat{}, err - } - if err := invalidArgumentError(object); err != nil { - return ObjectStat{}, err - } - req, err := a.headObjectRequest(bucket, object) - if err != nil { - return ObjectStat{}, err - } - resp, err := req.Do() - defer closeResp(resp) - if err != nil { - return ObjectStat{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - var errorResponse ErrorResponse - switch resp.StatusCode { - case http.StatusNotFound: - errorResponse = ErrorResponse{ - Code: "NoSuchKey", - Message: "The specified key does not exist.", - Resource: separator + bucket + separator + object, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - } - case http.StatusForbidden: - errorResponse = ErrorResponse{ - Code: "AccessDenied", - Message: "Access Denied.", - Resource: separator + bucket + separator + object, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - } - default: - errorResponse = ErrorResponse{ - Code: resp.Status, - Message: resp.Status, - Resource: separator + bucket + separator + object, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - } - - } - return ObjectStat{}, errorResponse - } - } - md5sum := strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes - size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - if err != nil { - return ObjectStat{}, ErrorResponse{ - Code: "InternalError", - Message: "Content-Length not recognized, please report this issue at https://github.com/minio/minio-go/issues.", - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - } - } - date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) - if err != nil { - return ObjectStat{}, ErrorResponse{ - Code: "InternalError", - Message: "Last-Modified time format not recognized, please report this issue at https://github.com/minio/minio-go/issues.", - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - } - } - contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) - if contentType == "" { - contentType = "application/octet-stream" - } - - var objectstat ObjectStat - objectstat.ETag = md5sum - objectstat.Key = object - objectstat.Size = size - objectstat.LastModified = date - objectstat.ContentType = contentType - return objectstat, nil -} - -/// Service Operations - -// listBucketRequest wrapper creates a new listBuckets request -func (a apiCore) listBucketsRequest() (*request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "GET", - HTTPPath: separator, - } - return newRequest(op, a.config, nil) -} - -// listBuckets list of all buckets owned by the authenticated sender of the request -func (a apiCore) listBuckets() (listAllMyBucketsResult, error) { - req, err := a.listBucketsRequest() - if err != nil { - return listAllMyBucketsResult{}, err - } - resp, err := req.Do() - defer closeResp(resp) - if err != nil { - return listAllMyBucketsResult{}, err - } - if resp != nil { - // for un-authenticated requests, amazon sends a redirect handle it - if resp.StatusCode == http.StatusTemporaryRedirect { - return listAllMyBucketsResult{}, ErrorResponse{ - Code: "AccessDenied", - Message: "Anonymous access is forbidden for this operation.", - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - } - } - if resp.StatusCode != http.StatusOK { - return listAllMyBucketsResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) - } - } - listAllMyBucketsResult := listAllMyBucketsResult{} - err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &listAllMyBucketsResult) - if err != nil { - return listAllMyBucketsResult, err - } - return listAllMyBucketsResult, nil -} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-definitions.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-definitions.go new file mode 100644 index 000000000..7667645a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-definitions.go @@ -0,0 +1,93 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "io" + "time" +) + +// BucketStat container for bucket metadata. +type BucketStat struct { + // The name of the bucket. + Name string + // Date the bucket was created. + CreationDate time.Time +} + +// ObjectStat container for object metadata. +type ObjectStat struct { + ETag string + Key string + LastModified time.Time + Size int64 + ContentType string + + // Owner name. + Owner struct { + DisplayName string + ID string + } + + // The class of storage used to store the object. + StorageClass string + + // Error + Err error +} + +// ObjectMultipartStat container for multipart object metadata. +type ObjectMultipartStat struct { + // Date and time at which the multipart upload was initiated. + Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Initiator initiator + Owner owner + + StorageClass string + + // Key of the object for which the multipart upload was initiated. + Key string + Size int64 + + // Upload ID that identifies the multipart upload. + UploadID string `xml:"UploadId"` + + // Error + Err error +} + +// partMetadata - container for each partMetadata. +type partMetadata struct { + MD5Sum []byte + Sha256Sum []byte + ReadCloser io.ReadCloser + Size int64 + Number int // partMetadata number. + + // Error + Err error +} + +// putObjectMetadata - container for each single PUT operation. +type putObjectMetadata struct { + MD5Sum []byte + Sha256Sum []byte + ReadCloser io.ReadCloser + Size int64 + ContentType string +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-error-response.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-error-response.go new file mode 100644 index 000000000..0d2496507 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-error-response.go @@ -0,0 +1,232 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "net/http" + "strconv" +) + +/* **** SAMPLE ERROR RESPONSE **** + + + AccessDenied + Access Denied + bucketName + objectName + F19772218238A85A + GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD + +*/ + +// ErrorResponse is the type error returned by some API operations. +type ErrorResponse struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + BucketName string + Key string + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` + + // This is a new undocumented field, set only if available. + AmzBucketRegion string +} + +// ToErrorResponse returns parsed ErrorResponse struct, if input is nil or not ErrorResponse return value is nil +// this fuction is useful when some one wants to dig deeper into the error structures over the network. +// +// For example: +// +// import s3 "github.com/minio/minio-go" +// ... +// ... +// reader, stat, err := s3.GetObject(...) +// if err != nil { +// resp := s3.ToErrorResponse(err) +// fmt.Println(resp.ToXML()) +// } +// ... +func ToErrorResponse(err error) ErrorResponse { + switch err := err.(type) { + case ErrorResponse: + return err + default: + return ErrorResponse{} + } +} + +// ToXML send raw xml marshalled as string +func (e ErrorResponse) ToXML() string { + b, err := xml.Marshal(&e) + if err != nil { + panic(err) + } + return string(b) +} + +// ToJSON send raw json marshalled as string +func (e ErrorResponse) ToJSON() string { + b, err := json.Marshal(&e) + if err != nil { + panic(err) + } + return string(b) +} + +// Error formats HTTP error string +func (e ErrorResponse) Error() string { + return e.Message +} + +// Common reporting string +const ( + reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." +) + +// HTTPRespToErrorResponse returns a new encoded ErrorResponse structure +func HTTPRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { + if resp == nil { + msg := "Response is empty. " + reportIssue + return ErrInvalidArgument(msg) + } + var errorResponse ErrorResponse + err := xmlDecoder(resp.Body, &errorResponse) + if err != nil { + switch resp.StatusCode { + case http.StatusNotFound: + if objectName == "" { + errorResponse = ErrorResponse{ + Code: "NoSuchBucket", + Message: "The specified bucket does not exist.", + BucketName: bucketName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + } else { + errorResponse = ErrorResponse{ + Code: "NoSuchKey", + Message: "The specified key does not exist.", + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + } + case http.StatusForbidden: + errorResponse = ErrorResponse{ + Code: "AccessDenied", + Message: "Access Denied.", + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + case http.StatusConflict: + errorResponse = ErrorResponse{ + Code: "Conflict", + Message: "Bucket not empty.", + BucketName: bucketName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + default: + errorResponse = ErrorResponse{ + Code: resp.Status, + Message: resp.Status, + BucketName: bucketName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + } + } + return errorResponse +} + +// ErrEntityTooLarge input size is larger than supported maximum. +func ErrEntityTooLarge(totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size '5GiB' for single PUT operation.", totalSize) + return ErrorResponse{ + Code: "EntityTooLarge", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// ErrUnexpectedShortRead unexpected shorter read of input buffer from target. +func ErrUnexpectedShortRead(totalRead, totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Data read ‘%s’ is shorter than the size ‘%s’ of input buffer.", + strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10)) + return ErrorResponse{ + Code: "UnexpectedShortRead", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// ErrUnexpectedEOF unexpected end of file reached. +func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Data read ‘%s’ is not equal to the size ‘%s’ of the input Reader.", + strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10)) + return ErrorResponse{ + Code: "UnexpectedEOF", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// ErrInvalidBucketName - invalid bucket name response. +func ErrInvalidBucketName(message string) error { + return ErrorResponse{ + Code: "InvalidBucketName", + Message: message, + RequestID: "minio", + } +} + +// ErrInvalidObjectName - invalid object name response. +func ErrInvalidObjectName(message string) error { + return ErrorResponse{ + Code: "NoSuchKey", + Message: message, + RequestID: "minio", + } +} + +// ErrInvalidObjectPrefix - invalid object prefix response is +// similar to object name response. +var ErrInvalidObjectPrefix = ErrInvalidObjectName + +// ErrInvalidArgument - invalid argument response. +func ErrInvalidArgument(message string) error { + return ErrorResponse{ + Code: "InvalidArgument", + Message: message, + RequestID: "minio", + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-fget-object.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-fget-object.go new file mode 100644 index 000000000..ee96a6cb9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-fget-object.go @@ -0,0 +1,102 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "io" + "os" + "path/filepath" +) + +// FGetObject - get object to a file. +func (c Client) FGetObject(bucketName, objectName, filePath string) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + if err := isValidObjectName(objectName); err != nil { + return err + } + + // Verify if destination already exists. + st, err := os.Stat(filePath) + if err == nil { + // If the destination exists and is a directory. + if st.IsDir() { + return ErrInvalidArgument("fileName is a directory.") + } + } + + // Proceed if file does not exist. return for all other errors. + if err != nil { + if !os.IsNotExist(err) { + return err + } + } + + // Extract top level direcotry. + objectDir, _ := filepath.Split(filePath) + if objectDir != "" { + // Create any missing top level directories. + if err := os.MkdirAll(objectDir, 0700); err != nil { + return err + } + } + + // Gather md5sum. + objectStat, err := c.StatObject(bucketName, objectName) + if err != nil { + return err + } + + // Write to a temporary file "fileName.part.minio" before saving. + filePartPath := filePath + objectStat.ETag + ".part.minio" + + // If exists, open in append mode. If not create it as a part file. + filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) + if err != nil { + return err + } + + // Issue Stat to get the current offset. + st, err = filePart.Stat() + if err != nil { + return err + } + + // Seek to current position for incoming reader. + objectReader, objectStat, err := c.getObject(bucketName, objectName, st.Size(), 0) + if err != nil { + return err + } + + // Write to the part file. + if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil { + return err + } + + // Close the file before rename, this is specifically needed for Windows users. + filePart.Close() + + // Safely completed. Now commit by renaming to actual filename. + if err = os.Rename(filePartPath, filePath); err != nil { + return err + } + + // Return. + return nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-fput-object.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-fput-object.go new file mode 100644 index 000000000..059710038 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-fput-object.go @@ -0,0 +1,281 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/md5" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash" + "io" + "io/ioutil" + "os" + "sort" +) + +// getUploadID if already present for object name or initiate a request to fetch a new upload id. +func (c Client) getUploadID(bucketName, objectName, contentType string) (string, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return "", err + } + if err := isValidObjectName(objectName); err != nil { + return "", err + } + + // Set content Type to default if empty string. + if contentType == "" { + contentType = "application/octet-stream" + } + + // Find upload id for previous upload for an object. + uploadID, err := c.findUploadID(bucketName, objectName) + if err != nil { + return "", err + } + if uploadID == "" { + // Initiate multipart upload for an object. + initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, contentType) + if err != nil { + return "", err + } + // Save the new upload id. + uploadID = initMultipartUploadResult.UploadID + } + return uploadID, nil +} + +// FPutObject - put object a file. +func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (int64, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + + // Open the referenced file. + fileData, err := os.Open(filePath) + // If any error fail quickly here. + if err != nil { + return 0, err + } + defer fileData.Close() + + // Save the file stat. + fileStat, err := fileData.Stat() + if err != nil { + return 0, err + } + + // Save the file size. + fileSize := fileStat.Size() + if fileSize > int64(maxMultipartPutObjectSize) { + return 0, ErrInvalidArgument("Input file size is bigger than the supported maximum of 5TiB.") + } + + // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs. + // Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers. + if isGoogleEndpoint(c.endpointURL) { + if fileSize <= -1 || fileSize > int64(maxSinglePutObjectSize) { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize), + Key: objectName, + BucketName: bucketName, + } + } + // Do not compute MD5 for Google Cloud Storage. Uploads upto 5GiB in size. + n, err := c.putNoChecksum(bucketName, objectName, fileData, fileSize, contentType) + return n, err + } + + // NOTE: S3 doesn't allow anonymous multipart requests. + if isAmazonEndpoint(c.endpointURL) && c.anonymous { + if fileSize <= -1 || fileSize > int64(maxSinglePutObjectSize) { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", fileSize), + Key: objectName, + BucketName: bucketName, + } + } + // Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GiB in size. + n, err := c.putAnonymous(bucketName, objectName, fileData, fileSize, contentType) + return n, err + } + + // Large file upload is initiated for uploads for input data size + // if its greater than 5MiB or data size is negative. + if fileSize >= minimumPartSize || fileSize < 0 { + n, err := c.fputLargeObject(bucketName, objectName, fileData, fileSize, contentType) + return n, err + } + n, err := c.putSmallObject(bucketName, objectName, fileData, fileSize, contentType) + return n, err +} + +// computeHash - calculates MD5 and Sha256 for an input read Seeker. +func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) { + // MD5 and Sha256 hasher. + var hashMD5, hashSha256 hash.Hash + // MD5 and Sha256 hasher. + hashMD5 = md5.New() + hashWriter := io.MultiWriter(hashMD5) + if c.signature.isV4() { + hashSha256 = sha256.New() + hashWriter = io.MultiWriter(hashMD5, hashSha256) + } + + size, err = io.Copy(hashWriter, reader) + if err != nil { + return nil, nil, 0, err + } + + // Seek back reader to the beginning location. + if _, err := reader.Seek(0, 0); err != nil { + return nil, nil, 0, err + } + + // Finalize md5shum and sha256 sum. + md5Sum = hashMD5.Sum(nil) + if c.signature.isV4() { + sha256Sum = hashSha256.Sum(nil) + } + return md5Sum, sha256Sum, size, nil +} + +func (c Client) fputLargeObject(bucketName, objectName string, fileData *os.File, fileSize int64, contentType string) (int64, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + + // getUploadID for an object, initiates a new multipart request + // if it cannot find any previously partially uploaded object. + uploadID, err := c.getUploadID(bucketName, objectName, contentType) + if err != nil { + return 0, err + } + + // total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var completeMultipartUpload completeMultipartUpload + + // Fetch previously upload parts and save the total size. + partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) + if err != nil { + return 0, err + } + // Previous maximum part size + var prevMaxPartSize int64 + // Loop through all parts and calculate totalUploadedSize. + for _, partInfo := range partsInfo { + totalUploadedSize += partInfo.Size + // Choose the maximum part size. + if partInfo.Size >= prevMaxPartSize { + prevMaxPartSize = partInfo.Size + } + } + + // Calculate the optimal part size for a given file size. + partSize := optimalPartSize(fileSize) + // If prevMaxPartSize is set use that. + if prevMaxPartSize != 0 { + partSize = prevMaxPartSize + } + + // Part number always starts with '1'. + partNumber := 1 + + // Loop through until EOF. + for totalUploadedSize < fileSize { + // Get a section reader on a particular offset. + sectionReader := io.NewSectionReader(fileData, totalUploadedSize, partSize) + + // Calculates MD5 and Sha256 sum for a section reader. + md5Sum, sha256Sum, size, err := c.computeHash(sectionReader) + if err != nil { + return 0, err + } + + // Save all the part metadata. + partMdata := partMetadata{ + ReadCloser: ioutil.NopCloser(sectionReader), + Size: size, + MD5Sum: md5Sum, + Sha256Sum: sha256Sum, + Number: partNumber, // Part number to be uploaded. + } + + // If part number already uploaded, move to the next one. + if isPartUploaded(objectPart{ + ETag: hex.EncodeToString(partMdata.MD5Sum), + PartNumber: partMdata.Number, + }, partsInfo) { + // Close the read closer. + partMdata.ReadCloser.Close() + continue + } + + // Upload the part. + objPart, err := c.uploadPart(bucketName, objectName, uploadID, partMdata) + if err != nil { + partMdata.ReadCloser.Close() + return totalUploadedSize, err + } + + // Save successfully uploaded size. + totalUploadedSize += partMdata.Size + + // Save successfully uploaded part metadata. + partsInfo[partMdata.Number] = objPart + + // Increment to next part number. + partNumber++ + } + + // if totalUploadedSize is different than the file 'size'. Do not complete the request throw an error. + if totalUploadedSize != fileSize { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName) + } + + // Loop over uploaded parts to save them in a Parts array before completing the multipart request. + for _, part := range partsInfo { + var complPart completePart + complPart.ETag = part.ETag + complPart.PartNumber = part.PartNumber + completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart) + } + + // Sort all completed parts. + sort.Sort(completedParts(completeMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload) + if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go new file mode 100644 index 000000000..b331fb44c --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go @@ -0,0 +1,379 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "errors" + "fmt" + "io" + "math" + "net/http" + "net/url" + "strings" + "sync" + "time" +) + +// GetBucketACL get the permissions on an existing bucket. +// +// Returned values are: +// +// private - owner gets full access. +// public-read - owner gets full access, others get read access. +// public-read-write - owner gets full access, others get full access too. +// authenticated-read - owner gets full access, authenticated users get read access. +func (c Client) GetBucketACL(bucketName string) (BucketACL, error) { + if err := isValidBucketName(bucketName); err != nil { + return "", err + } + + // Set acl query. + urlValues := make(url.Values) + urlValues.Set("acl", "") + + // Instantiate a new request. + req, err := c.newRequest("GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + if err != nil { + return "", err + } + + // Initiate the request. + resp, err := c.httpClient.Do(req) + defer closeResponse(resp) + if err != nil { + return "", err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", HTTPRespToErrorResponse(resp, bucketName, "") + } + } + + // Decode access control policy. + policy := accessControlPolicy{} + err = xmlDecoder(resp.Body, &policy) + if err != nil { + return "", err + } + + // We need to avoid following de-serialization check for Google Cloud Storage. + // On Google Cloud Storage "private" canned ACL's policy do not have grant list. + // Treat it as a valid case, check for all other vendors. + if !isGoogleEndpoint(c.endpointURL) { + if policy.AccessControlList.Grant == nil { + errorResponse := ErrorResponse{ + Code: "InternalError", + Message: "Access control Grant list is empty. " + reportIssue, + BucketName: bucketName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + return "", errorResponse + } + } + + // boolean cues to indentify right canned acls. + var publicRead, publicWrite bool + + // Handle grants. + grants := policy.AccessControlList.Grant + for _, g := range grants { + if g.Grantee.URI == "" && g.Permission == "FULL_CONTROL" { + continue + } + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { + return BucketACL("authenticated-read"), nil + } else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { + publicWrite = true + } else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { + publicRead = true + } + } + + // public write and not enabled. return. + if !publicWrite && !publicRead { + return BucketACL("private"), nil + } + // public write not enabled but public read is. return. + if !publicWrite && publicRead { + return BucketACL("public-read"), nil + } + // public read and public write are enabled return. + if publicRead && publicWrite { + return BucketACL("public-read-write"), nil + } + + return "", ErrorResponse{ + Code: "NoSuchBucketPolicy", + Message: "The specified bucket does not have a bucket policy.", + BucketName: bucketName, + RequestID: "minio", + } +} + +// GetObject gets object content from specified bucket. +// You may also look at GetPartialObject. +func (c Client) GetObject(bucketName, objectName string) (io.ReadCloser, ObjectStat, error) { + if err := isValidBucketName(bucketName); err != nil { + return nil, ObjectStat{}, err + } + if err := isValidObjectName(objectName); err != nil { + return nil, ObjectStat{}, err + } + // get the whole object as a stream, no seek or resume supported for this. + return c.getObject(bucketName, objectName, 0, 0) +} + +// ReadAtCloser readat closer interface. +type ReadAtCloser interface { + io.ReaderAt + io.Closer +} + +// GetObjectPartial returns a io.ReadAt for reading sparse entries. +func (c Client) GetObjectPartial(bucketName, objectName string) (ReadAtCloser, ObjectStat, error) { + if err := isValidBucketName(bucketName); err != nil { + return nil, ObjectStat{}, err + } + if err := isValidObjectName(objectName); err != nil { + return nil, ObjectStat{}, err + } + // Send an explicit stat to get the actual object size. + objectStat, err := c.StatObject(bucketName, objectName) + if err != nil { + return nil, ObjectStat{}, err + } + + // Create request channel. + reqCh := make(chan readAtRequest) + // Create response channel. + resCh := make(chan readAtResponse) + // Create done channel. + doneCh := make(chan struct{}) + + // This routine feeds partial object data as and when the caller reads. + go func() { + defer close(reqCh) + defer close(resCh) + + // Loop through the incoming control messages and read data. + for { + select { + // When the done channel is closed exit our routine. + case <-doneCh: + return + // Request message. + case req := <-reqCh: + // Get shortest length. + // NOTE: Last remaining bytes are usually smaller than + // req.Buffer size. Use that as the final length. + length := math.Min(float64(len(req.Buffer)), float64(objectStat.Size-req.Offset)) + httpReader, _, err := c.getObject(bucketName, objectName, req.Offset, int64(length)) + if err != nil { + resCh <- readAtResponse{ + Error: err, + } + return + } + size, err := httpReader.Read(req.Buffer) + resCh <- readAtResponse{ + Size: size, + Error: err, + } + } + } + }() + // Return the readerAt backed by routine. + return newObjectReadAtCloser(reqCh, resCh, doneCh, objectStat.Size), objectStat, nil +} + +// response message container to reply back for the request. +type readAtResponse struct { + Size int + Error error +} + +// request message container to communicate with internal go-routine. +type readAtRequest struct { + Buffer []byte // requested bytes. + Offset int64 // readAt offset. +} + +// objectReadAtCloser container for io.ReadAtCloser. +type objectReadAtCloser struct { + // mutex. + mutex *sync.Mutex + + // User allocated and defined. + reqCh chan<- readAtRequest + resCh <-chan readAtResponse + doneCh chan<- struct{} + objectSize int64 + + // Previous error saved for future calls. + prevErr error +} + +// newObjectReadAtCloser implements a io.ReadSeeker for a HTTP stream. +func newObjectReadAtCloser(reqCh chan<- readAtRequest, resCh <-chan readAtResponse, doneCh chan<- struct{}, objectSize int64) *objectReadAtCloser { + return &objectReadAtCloser{ + mutex: new(sync.Mutex), + reqCh: reqCh, + resCh: resCh, + doneCh: doneCh, + objectSize: objectSize, + } +} + +// ReadAt reads len(b) bytes from the File starting at byte offset off. +// It returns the number of bytes read and the error, if any. +// ReadAt always returns a non-nil error when n < len(b). +// At end of file, that error is io.EOF. +func (r *objectReadAtCloser) ReadAt(p []byte, offset int64) (int, error) { + // Locking. + r.mutex.Lock() + defer r.mutex.Unlock() + + // prevErr is which was saved in previous operation. + if r.prevErr != nil { + return 0, r.prevErr + } + + // Send current information over control channel to indicate we are ready. + reqMsg := readAtRequest{} + + // Send the current offset and bytes requested. + reqMsg.Buffer = p + reqMsg.Offset = offset + + // Send read request over the control channel. + r.reqCh <- reqMsg + + // Get data over the response channel. + dataMsg := <-r.resCh + + // Save any error. + r.prevErr = dataMsg.Error + if dataMsg.Error != nil { + if dataMsg.Error == io.EOF { + return dataMsg.Size, dataMsg.Error + } + return 0, dataMsg.Error + } + return dataMsg.Size, nil +} + +// Closer is the interface that wraps the basic Close method. +// +// The behavior of Close after the first call returns error for +// subsequent Close() calls. +func (r *objectReadAtCloser) Close() (err error) { + // Locking. + r.mutex.Lock() + defer r.mutex.Unlock() + + // prevErr is which was saved in previous operation. + if r.prevErr != nil { + return r.prevErr + } + + // Close successfully. + close(r.doneCh) + + // Save this for any subsequent frivolous reads. + errMsg := "objectReadAtCloser: is already closed. Bad file descriptor." + r.prevErr = errors.New(errMsg) + return +} + +// getObject - retrieve object from Object Storage. +// +// Additionally this function also takes range arguments to download the specified +// range bytes of an object. Setting offset and length = 0 will download the full object. +// +// For more information about the HTTP Range header. +// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. +func (c Client) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectStat, error) { + // Validate input arguments. + if err := isValidBucketName(bucketName); err != nil { + return nil, ObjectStat{}, err + } + if err := isValidObjectName(objectName); err != nil { + return nil, ObjectStat{}, err + } + + customHeader := make(http.Header) + // Set ranges if length and offset are valid. + if length > 0 && offset >= 0 { + customHeader.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) + } else if offset > 0 && length == 0 { + customHeader.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } else if length < 0 && offset == 0 { + customHeader.Set("Range", fmt.Sprintf("bytes=%d", length)) + } + + // Instantiate a new request. + req, err := c.newRequest("GET", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: customHeader, + }) + if err != nil { + return nil, ObjectStat{}, err + } + // Execute the request. + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, ObjectStat{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + return nil, ObjectStat{}, HTTPRespToErrorResponse(resp, bucketName, objectName) + } + } + // trim off the odd double quotes. + md5sum := strings.Trim(resp.Header.Get("ETag"), "\"") + // parse the date. + date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) + if err != nil { + msg := "Last-Modified time format not recognized. " + reportIssue + return nil, ObjectStat{}, ErrorResponse{ + Code: "InternalError", + Message: msg, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + } + contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) + if contentType == "" { + contentType = "application/octet-stream" + } + var objectStat ObjectStat + objectStat.ETag = md5sum + objectStat.Key = objectName + objectStat.Size = resp.ContentLength + objectStat.LastModified = date + objectStat.ContentType = contentType + + // do not close body here, caller will close + return resp.Body, objectStat, nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-list.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-list.go new file mode 100644 index 000000000..180a28a9a --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-list.go @@ -0,0 +1,486 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "fmt" + "net/http" + "net/url" +) + +// ListBuckets list all buckets owned by this authenticated user. +// +// This call requires explicit authentication, no anonymous requests are +// allowed for listing buckets. +// +// api := client.New(....) +// for message := range api.ListBuckets() { +// fmt.Println(message) +// } +// +func (c Client) ListBuckets() ([]BucketStat, error) { + // Instantiate a new request. + req, err := c.newRequest("GET", requestMetadata{}) + if err != nil { + return nil, err + } + // Initiate the request. + resp, err := c.httpClient.Do(req) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, HTTPRespToErrorResponse(resp, "", "") + } + } + listAllMyBucketsResult := listAllMyBucketsResult{} + err = xmlDecoder(resp.Body, &listAllMyBucketsResult) + if err != nil { + return nil, err + } + return listAllMyBucketsResult.Buckets.Bucket, nil +} + +// ListObjects - (List Objects) - List some objects or all recursively. +// +// ListObjects lists all objects matching the objectPrefix from +// the specified bucket. If recursion is enabled it would list +// all subdirectories and all its contents. +// +// Your input paramters are just bucketName, objectPrefix and recursive. If you +// enable recursive as 'true' this function will return back all the +// objects in a given bucket name and object prefix. +// +// api := client.New(....) +// recursive := true +// for message := range api.ListObjects("mytestbucket", "starthere", recursive) { +// fmt.Println(message) +// } +// +func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectStat { + // Allocate new list objects channel. + objectStatCh := make(chan ObjectStat, 1000) + // Default listing is delimited at "/" + delimiter := "/" + if recursive { + // If recursive we do not delimit. + delimiter = "" + } + // Validate bucket name. + if err := isValidBucketName(bucketName); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectStat{ + Err: err, + } + return objectStatCh + } + // Validate incoming object prefix. + if err := isValidObjectPrefix(objectPrefix); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectStat{ + Err: err, + } + return objectStatCh + } + + // Initiate list objects goroutine here. + go func(objectStatCh chan<- ObjectStat) { + defer close(objectStatCh) + // Save marker for next request. + var marker string + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000) + if err != nil { + objectStatCh <- ObjectStat{ + Err: err, + } + return + } + + // If contents are available loop through and send over channel. + for _, object := range result.Contents { + // Save the marker. + marker = object.Key + select { + // Send object content. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + object := ObjectStat{} + object.Key = obj.Prefix + object.Size = 0 + select { + // Send object prefixes. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // If next marker present, save it for next request. + if result.NextMarker != "" { + marker = result.NextMarker + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectStatCh) + return objectStatCh +} + +/// Bucket Read Operations. + +// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request paramters :- +// --------- +// ?marker - Specifies the key to start with when listing objects in a bucket. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (listBucketResult, error) { + // Validate bucket name. + if err := isValidBucketName(bucketName); err != nil { + return listBucketResult{}, err + } + // Validate object prefix. + if err := isValidObjectPrefix(objectPrefix); err != nil { + return listBucketResult{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + // Set object prefix. + urlValues.Set("prefix", urlEncodePath(objectPrefix)) + // Set object marker. + urlValues.Set("marker", urlEncodePath(objectMarker)) + // Set delimiter. + urlValues.Set("delimiter", delimiter) + // Set max keys. + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + + // Initialize a new request. + req, err := c.newRequest("GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + if err != nil { + return listBucketResult{}, err + } + // Execute list buckets. + resp, err := c.httpClient.Do(req) + defer closeResponse(resp) + if err != nil { + return listBucketResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return listBucketResult{}, HTTPRespToErrorResponse(resp, bucketName, "") + } + } + // Decode listBuckets XML. + listBucketResult := listBucketResult{} + err = xmlDecoder(resp.Body, &listBucketResult) + if err != nil { + return listBucketResult, err + } + return listBucketResult, nil +} + +// ListIncompleteUploads - List incompletely uploaded multipart objects. +// +// ListIncompleteUploads lists all incompleted objects matching the +// objectPrefix from the specified bucket. If recursion is enabled +// it would list all subdirectories and all its contents. +// +// Your input paramters are just bucketName, objectPrefix and recursive. +// If you enable recursive as 'true' this function will return back all +// the multipart objects in a given bucket name. +// +// api := client.New(....) +// recursive := true +// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) { +// fmt.Println(message) +// } +// +func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartStat { + // Turn on size aggregation of individual parts. + isAggregateSize := true + return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh) +} + +// listIncompleteUploads lists all incomplete uploads. +func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartStat { + // Allocate channel for multipart uploads. + objectMultipartStatCh := make(chan ObjectMultipartStat, 1000) + // Delimiter is set to "/" by default. + delimiter := "/" + if recursive { + // If recursive do not delimit. + delimiter = "" + } + // Validate bucket name. + if err := isValidBucketName(bucketName); err != nil { + defer close(objectMultipartStatCh) + objectMultipartStatCh <- ObjectMultipartStat{ + Err: err, + } + return objectMultipartStatCh + } + // Validate incoming object prefix. + if err := isValidObjectPrefix(objectPrefix); err != nil { + defer close(objectMultipartStatCh) + objectMultipartStatCh <- ObjectMultipartStat{ + Err: err, + } + return objectMultipartStatCh + } + go func(objectMultipartStatCh chan<- ObjectMultipartStat) { + defer close(objectMultipartStatCh) + // object and upload ID marker for future requests. + var objectMarker string + var uploadIDMarker string + for { + // list all multipart uploads. + result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000) + if err != nil { + objectMultipartStatCh <- ObjectMultipartStat{ + Err: err, + } + return + } + // Save objectMarker and uploadIDMarker for next request. + objectMarker = result.NextKeyMarker + uploadIDMarker = result.NextUploadIDMarker + // Send all multipart uploads. + for _, obj := range result.Uploads { + // Calculate total size of the uploaded parts if 'aggregateSize' is enabled. + if aggregateSize { + // Get total multipart size. + obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID) + if err != nil { + objectMultipartStatCh <- ObjectMultipartStat{ + Err: err, + } + } + } + select { + // Send individual uploads here. + case objectMultipartStatCh <- obj: + // If done channel return here. + case <-doneCh: + return + } + } + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + object := ObjectMultipartStat{} + object.Key = obj.Prefix + object.Size = 0 + select { + // Send delimited prefixes here. + case objectMultipartStatCh <- object: + // If done channel return here. + case <-doneCh: + return + } + } + // Listing ends if result not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectMultipartStatCh) + // return. + return objectMultipartStatCh +} + +// listMultipartUploads - (List Multipart Uploads). +// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. +// request paramters. :- +// --------- +// ?key-marker - Specifies the multipart upload after which listing should begin. +// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. +func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (listMultipartUploadsResult, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set uploads. + urlValues.Set("uploads", "") + // Set object key marker. + urlValues.Set("key-marker", urlEncodePath(keyMarker)) + // Set upload id marker. + urlValues.Set("upload-id-marker", uploadIDMarker) + // Set prefix marker. + urlValues.Set("prefix", urlEncodePath(prefix)) + // Set delimiter. + urlValues.Set("delimiter", delimiter) + // Set max-uploads. + urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) + + // Instantiate a new request. + req, err := c.newRequest("GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + if err != nil { + return listMultipartUploadsResult{}, err + } + // Execute list multipart uploads request. + resp, err := c.httpClient.Do(req) + defer closeResponse(resp) + if err != nil { + return listMultipartUploadsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return listMultipartUploadsResult{}, HTTPRespToErrorResponse(resp, bucketName, "") + } + } + // Decode response body. + listMultipartUploadsResult := listMultipartUploadsResult{} + err = xmlDecoder(resp.Body, &listMultipartUploadsResult) + if err != nil { + return listMultipartUploadsResult, err + } + return listMultipartUploadsResult, nil +} + +// listObjectParts list all object parts recursively. +func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]objectPart, err error) { + // Part number marker for the next batch of request. + var nextPartNumberMarker int + partsInfo = make(map[int]objectPart) + for { + // Get list of uploaded parts a maximum of 1000 per request. + listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000) + if err != nil { + return nil, err + } + // Append to parts info. + for _, part := range listObjPartsResult.ObjectParts { + partsInfo[part.PartNumber] = part + } + // Keep part number marker, for the next iteration. + nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker + // Listing ends result is not truncated, return right here. + if !listObjPartsResult.IsTruncated { + break + } + } + + // Return all the parts. + return partsInfo, nil +} + +// findUploadID lists all incomplete uploads and finds the uploadID of the matching object name. +func (c Client) findUploadID(bucketName, objectName string) (string, error) { + // Make list incomplete uploads recursive. + isRecursive := true + // Turn off size aggregation of individual parts, in this request. + isAggregateSize := false + // NOTE: done Channel is set to 'nil, this will drain go routine until exhaustion. + for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, nil) { + if mpUpload.Err != nil { + return "", mpUpload.Err + } + // if object name found, return the upload id. + if objectName == mpUpload.Key { + return mpUpload.UploadID, nil + } + } + // No upload id was found, return success and empty upload id. + return "", nil +} + +// getTotalMultipartSize - calculate total uploaded size for the a given multipart object. +func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (size int64, err error) { + // Iterate over all parts and aggregate the size. + partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) + if err != nil { + return 0, err + } + for _, partInfo := range partsInfo { + size += partInfo.Size + } + return size, nil +} + +// listObjectPartsQuery (List Parts query) +// - lists some or all (up to 1000) parts that have been uploaded for a specific multipart upload +// +// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. +// request paramters :- +// --------- +// ?part-number-marker - Specifies the part after which listing should begin. +func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number marker. + urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + // Set max parts. + urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) + + req, err := c.newRequest("GET", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + }) + if err != nil { + return listObjectPartsResult{}, err + } + // Exectue list object parts. + resp, err := c.httpClient.Do(req) + defer closeResponse(resp) + if err != nil { + return listObjectPartsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return listObjectPartsResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode list object parts XML. + listObjectPartsResult := listObjectPartsResult{} + err = xmlDecoder(resp.Body, &listObjectPartsResult) + if err != nil { + return listObjectPartsResult, err + } + return listObjectPartsResult, nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-multipart-core.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-multipart-core.go deleted file mode 100644 index 1236058cd..000000000 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-multipart-core.go +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "encoding/base64" - "encoding/hex" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "net/http" - "strconv" -) - -// listMultipartUploadsRequest wrapper creates a new listMultipartUploads request -func (a apiCore) listMultipartUploadsRequest(bucket, keymarker, uploadIDMarker, prefix, delimiter string, maxuploads int) (*request, error) { - // resourceQuery - get resources properly escaped and lined up before using them in http request - resourceQuery := func() (string, error) { - switch { - case keymarker != "": - keymarker = fmt.Sprintf("&key-marker=%s", getURLEncodedPath(keymarker)) - fallthrough - case uploadIDMarker != "": - uploadIDMarker = fmt.Sprintf("&upload-id-marker=%s", uploadIDMarker) - fallthrough - case prefix != "": - prefix = fmt.Sprintf("&prefix=%s", getURLEncodedPath(prefix)) - fallthrough - case delimiter != "": - delimiter = fmt.Sprintf("&delimiter=%s", delimiter) - } - query := fmt.Sprintf("?uploads&max-uploads=%d", maxuploads) + keymarker + uploadIDMarker + prefix + delimiter - return query, nil - } - query, err := resourceQuery() - if err != nil { - return nil, err - } - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "GET", - HTTPPath: separator + bucket + query, - } - r, err := newRequest(op, a.config, nil) - if err != nil { - return nil, err - } - return r, nil -} - -// listMultipartUploads - (List Multipart Uploads) - Lists some or all (up to 1000) in-progress multipart uploads in a bucket. -// -// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. -// request paramters :- -// --------- -// ?key-marker - Specifies the multipart upload after which listing should begin -// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin -// ?delimiter - A delimiter is a character you use to group keys. -// ?prefix - Limits the response to keys that begin with the specified prefix. -// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. -func (a apiCore) listMultipartUploads(bucket, keymarker, uploadIDMarker, prefix, delimiter string, maxuploads int) (listMultipartUploadsResult, error) { - req, err := a.listMultipartUploadsRequest(bucket, keymarker, uploadIDMarker, prefix, delimiter, maxuploads) - if err != nil { - return listMultipartUploadsResult{}, err - } - resp, err := req.Do() - defer closeResp(resp) - if err != nil { - return listMultipartUploadsResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return listMultipartUploadsResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) - } - } - listMultipartUploadsResult := listMultipartUploadsResult{} - err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &listMultipartUploadsResult) - if err != nil { - return listMultipartUploadsResult, err - } - // close body while returning, along with any error - return listMultipartUploadsResult, nil -} - -// initiateMultipartRequest wrapper creates a new initiateMultiPart request -func (a apiCore) initiateMultipartRequest(bucket, object string) (*request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "POST", - HTTPPath: separator + bucket + separator + object + "?uploads", - } - return newRequest(op, a.config, nil) -} - -// initiateMultipartUpload initiates a multipart upload and returns an upload ID -func (a apiCore) initiateMultipartUpload(bucket, object string) (initiateMultipartUploadResult, error) { - req, err := a.initiateMultipartRequest(bucket, object) - if err != nil { - return initiateMultipartUploadResult{}, err - } - resp, err := req.Do() - defer closeResp(resp) - if err != nil { - return initiateMultipartUploadResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return initiateMultipartUploadResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) - } - } - initiateMultipartUploadResult := initiateMultipartUploadResult{} - err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &initiateMultipartUploadResult) - if err != nil { - return initiateMultipartUploadResult, err - } - return initiateMultipartUploadResult, nil -} - -// completeMultipartUploadRequest wrapper creates a new CompleteMultipartUpload request -func (a apiCore) completeMultipartUploadRequest(bucket, object, uploadID string, complete completeMultipartUpload) (*request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "POST", - HTTPPath: separator + bucket + separator + object + "?uploadId=" + uploadID, - } - var completeMultipartUploadBytes []byte - var err error - switch { - case a.config.AcceptType == "application/xml": - completeMultipartUploadBytes, err = xml.Marshal(complete) - case a.config.AcceptType == "application/json": - completeMultipartUploadBytes, err = json.Marshal(complete) - default: - completeMultipartUploadBytes, err = xml.Marshal(complete) - } - if err != nil { - return nil, err - } - completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) - r, err := newRequest(op, a.config, completeMultipartUploadBuffer) - if err != nil { - return nil, err - } - r.req.ContentLength = int64(completeMultipartUploadBuffer.Len()) - return r, nil -} - -// completeMultipartUpload completes a multipart upload by assembling previously uploaded parts. -func (a apiCore) completeMultipartUpload(bucket, object, uploadID string, c completeMultipartUpload) (completeMultipartUploadResult, error) { - req, err := a.completeMultipartUploadRequest(bucket, object, uploadID, c) - if err != nil { - return completeMultipartUploadResult{}, err - } - resp, err := req.Do() - defer closeResp(resp) - if err != nil { - return completeMultipartUploadResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return completeMultipartUploadResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) - } - } - completeMultipartUploadResult := completeMultipartUploadResult{} - err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &completeMultipartUploadResult) - if err != nil { - return completeMultipartUploadResult, err - } - return completeMultipartUploadResult, nil -} - -// abortMultipartUploadRequest wrapper creates a new AbortMultipartUpload request -func (a apiCore) abortMultipartUploadRequest(bucket, object, uploadID string) (*request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "DELETE", - HTTPPath: separator + bucket + separator + object + "?uploadId=" + uploadID, - } - return newRequest(op, a.config, nil) -} - -// abortMultipartUpload aborts a multipart upload for the given uploadID, all parts are deleted -func (a apiCore) abortMultipartUpload(bucket, object, uploadID string) error { - req, err := a.abortMultipartUploadRequest(bucket, object, uploadID) - if err != nil { - return err - } - resp, err := req.Do() - defer closeResp(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusNoContent { - // Abort has no response body, handle it - var errorResponse ErrorResponse - switch resp.StatusCode { - case http.StatusNotFound: - errorResponse = ErrorResponse{ - Code: "NoSuchUpload", - Message: "The specified multipart upload does not exist.", - Resource: separator + bucket + separator + object, - RequestID: resp.Header.Get("x-amz-request-id"), - } - case http.StatusForbidden: - errorResponse = ErrorResponse{ - Code: "AccessDenied", - Message: "Access Denied.", - Resource: separator + bucket + separator + object, - RequestID: resp.Header.Get("x-amz-request-id"), - } - default: - errorResponse = ErrorResponse{ - Code: resp.Status, - Message: "Unknown error, please report this at https://github.com/minio/minio-go-legacy/issues.", - Resource: separator + bucket + separator + object, - RequestID: resp.Header.Get("x-amz-request-id"), - } - } - return errorResponse - } - } - return nil -} - -// listObjectPartsRequest wrapper creates a new ListObjectParts request -func (a apiCore) listObjectPartsRequest(bucket, object, uploadID string, partNumberMarker, maxParts int) (*request, error) { - // resourceQuery - get resources properly escaped and lined up before using them in http request - resourceQuery := func() string { - var partNumberMarkerStr string - switch { - case partNumberMarker != 0: - partNumberMarkerStr = fmt.Sprintf("&part-number-marker=%d", partNumberMarker) - } - return fmt.Sprintf("?uploadId=%s&max-parts=%d", uploadID, maxParts) + partNumberMarkerStr - } - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "GET", - HTTPPath: separator + bucket + separator + object + resourceQuery(), - } - return newRequest(op, a.config, nil) -} - -// listObjectParts (List Parts) - lists some or all (up to 1000) parts that have been uploaded for a specific multipart upload -// -// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. -// request paramters :- -// --------- -// ?part-number-marker - Specifies the part after which listing should begin. -func (a apiCore) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) { - req, err := a.listObjectPartsRequest(bucket, object, uploadID, partNumberMarker, maxParts) - if err != nil { - return listObjectPartsResult{}, err - } - resp, err := req.Do() - defer closeResp(resp) - if err != nil { - return listObjectPartsResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return listObjectPartsResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) - } - } - listObjectPartsResult := listObjectPartsResult{} - err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &listObjectPartsResult) - if err != nil { - return listObjectPartsResult, err - } - return listObjectPartsResult, nil -} - -// uploadPartRequest wrapper creates a new UploadPart request -func (a apiCore) uploadPartRequest(bucket, object, uploadID string, md5SumBytes []byte, partNumber int, size int64, body io.ReadSeeker) (*request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "PUT", - HTTPPath: separator + bucket + separator + object + "?partNumber=" + strconv.Itoa(partNumber) + "&uploadId=" + uploadID, - } - r, err := newRequest(op, a.config, body) - if err != nil { - return nil, err - } - // set Content-MD5 as base64 encoded md5 - if md5SumBytes != nil { - r.Set("Content-MD5", base64.StdEncoding.EncodeToString(md5SumBytes)) - } - r.req.ContentLength = size - return r, nil -} - -// uploadPart uploads a part in a multipart upload. -func (a apiCore) uploadPart(bucket, object, uploadID string, md5SumBytes []byte, partNumber int, size int64, body io.ReadSeeker) (completePart, error) { - req, err := a.uploadPartRequest(bucket, object, uploadID, md5SumBytes, partNumber, size, body) - if err != nil { - return completePart{}, err - } - cPart := completePart{} - cPart.PartNumber = partNumber - cPart.ETag = "\"" + hex.EncodeToString(md5SumBytes) + "\"" - - // initiate the request - resp, err := req.Do() - defer closeResp(resp) - if err != nil { - return completePart{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return completePart{}, BodyToErrorResponse(resp.Body, a.config.AcceptType) - } - } - return cPart, nil -} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-presigned.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-presigned.go new file mode 100644 index 000000000..d46623631 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-presigned.go @@ -0,0 +1,147 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "errors" + "time" +) + +// PresignedGetObject returns a presigned URL to access an object without credentials. +// Expires maximum is 7days - ie. 604800 and minimum is 1. +func (c Client) PresignedGetObject(bucketName, objectName string, expires time.Duration) (string, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return "", err + } + if err := isValidObjectName(objectName); err != nil { + return "", err + } + if err := isValidExpiry(expires); err != nil { + return "", err + } + + expireSeconds := int64(expires / time.Second) + // Instantiate a new request. + // Since expires is set newRequest will presign the request. + req, err := c.newRequest("GET", requestMetadata{ + presignURL: true, + bucketName: bucketName, + objectName: objectName, + expires: expireSeconds, + }) + if err != nil { + return "", err + } + return req.URL.String(), nil +} + +// PresignedPutObject returns a presigned URL to upload an object without credentials. +// Expires maximum is 7days - ie. 604800 and minimum is 1. +func (c Client) PresignedPutObject(bucketName, objectName string, expires time.Duration) (string, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return "", err + } + if err := isValidObjectName(objectName); err != nil { + return "", err + } + if err := isValidExpiry(expires); err != nil { + return "", err + } + + expireSeconds := int64(expires / time.Second) + // Instantiate a new request. + // Since expires is set newRequest will presign the request. + req, err := c.newRequest("PUT", requestMetadata{ + presignURL: true, + bucketName: bucketName, + objectName: objectName, + expires: expireSeconds, + }) + if err != nil { + return "", err + } + return req.URL.String(), nil +} + +// PresignedPostPolicy returns POST form data to upload an object at a location. +func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { + // Validate input arguments. + if p.expiration.IsZero() { + return nil, errors.New("Expiration time must be specified") + } + if _, ok := p.formData["key"]; !ok { + return nil, errors.New("object key must be specified") + } + if _, ok := p.formData["bucket"]; !ok { + return nil, errors.New("bucket name must be specified") + } + + bucketName := p.formData["bucket"] + // Fetch the location. + location, err := c.getBucketLocation(bucketName) + if err != nil { + return nil, err + } + + // Keep time. + t := time.Now().UTC() + if c.signature.isV2() { + policyBase64 := p.base64() + p.formData["policy"] = policyBase64 + // For Google endpoint set this value to be 'GoogleAccessId'. + if isGoogleEndpoint(c.endpointURL) { + p.formData["GoogleAccessId"] = c.accessKeyID + } else { + // For all other endpoints set this value to be 'AWSAccessKeyId'. + p.formData["AWSAccessKeyId"] = c.accessKeyID + } + // Sign the policy. + p.formData["signature"] = PostPresignSignatureV2(policyBase64, c.secretAccessKey) + return p.formData, nil + } + + // Add date policy. + p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-date", + value: t.Format(iso8601DateFormat), + }) + // Add algorithm policy. + p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-algorithm", + value: signV4Algorithm, + }) + // Add a credential policy. + credential := getCredential(c.accessKeyID, location, t) + p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-credential", + value: credential, + }) + // get base64 encoded policy. + policyBase64 := p.base64() + // Fill in the form data. + p.formData["policy"] = policyBase64 + p.formData["x-amz-algorithm"] = signV4Algorithm + p.formData["x-amz-credential"] = credential + p.formData["x-amz-date"] = t.Format(iso8601DateFormat) + p.formData["x-amz-signature"] = PostPresignSignatureV4(policyBase64, t, c.secretAccessKey, location) + return p.formData, nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go new file mode 100644 index 000000000..97f54f782 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go @@ -0,0 +1,219 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "encoding/hex" + "encoding/xml" + "io/ioutil" + "net/http" + "net/url" +) + +/// Bucket operations + +// MakeBucket makes a new bucket. +// +// Optional arguments are acl and location - by default all buckets are created +// with ``private`` acl and in US Standard region. +// +// ACL valid values - http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html +// +// private - owner gets full access [default]. +// public-read - owner gets full access, all others get read access. +// public-read-write - owner gets full access, all others get full access too. +// authenticated-read - owner gets full access, authenticated users get read access. +// +// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html +// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations +func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) error { + // Validate if request is made on anonymous requests. + if c.anonymous { + return ErrInvalidArgument("Make bucket cannot be issued with anonymous credentials.") + } + + // Validate the input arguments. + if err := isValidBucketName(bucketName); err != nil { + return err + } + if !acl.isValidBucketACL() { + return ErrInvalidArgument("Unrecognized ACL " + acl.String()) + } + + // If location is empty, treat is a default region 'us-east-1'. + if location == "" { + location = "us-east-1" + } + + // Instantiate the request. + req, err := c.makeBucketRequest(bucketName, acl, location) + if err != nil { + return err + } + + // Execute the request. + resp, err := c.httpClient.Do(req) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return HTTPRespToErrorResponse(resp, bucketName, "") + } + } + + // Save the location into cache on a succesful makeBucket response. + c.bucketLocCache.Set(bucketName, location) + + // Return. + return nil +} + +// makeBucketRequest constructs request for makeBucket. +func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location string) (*http.Request, error) { + // Validate input arguments. + if err := isValidBucketName(bucketName); err != nil { + return nil, err + } + if !acl.isValidBucketACL() { + return nil, ErrInvalidArgument("Unrecognized ACL " + acl.String()) + } + + // Set get bucket location always as path style. + targetURL := c.endpointURL + if bucketName != "" { + // If endpoint supports virtual host style use that always. + // Currently only S3 and Google Cloud Storage would support this. + if isVirtualHostSupported(c.endpointURL) { + targetURL.Host = bucketName + "/" + c.endpointURL.Host + targetURL.Path = "/" + } else { + // If not fall back to using path style. + targetURL.Path = "/" + bucketName + } + } + + // get a new HTTP request for the method. + req, err := http.NewRequest("PUT", targetURL.String(), nil) + if err != nil { + return nil, err + } + + // by default bucket acl is set to private. + req.Header.Set("x-amz-acl", "private") + if acl != "" { + req.Header.Set("x-amz-acl", string(acl)) + } + + // set UserAgent for the request. + c.setUserAgent(req) + + // set sha256 sum for signature calculation only with signature version '4'. + if c.signature.isV4() { + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) + } + + // If location is not 'us-east-1' create bucket location config. + if location != "us-east-1" && location != "" { + createBucketConfig := new(createBucketConfiguration) + createBucketConfig.Location = location + var createBucketConfigBytes []byte + createBucketConfigBytes, err = xml.Marshal(createBucketConfig) + if err != nil { + return nil, err + } + createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes) + req.Body = ioutil.NopCloser(createBucketConfigBuffer) + req.ContentLength = int64(createBucketConfigBuffer.Len()) + if c.signature.isV4() { + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBuffer.Bytes()))) + } + } + + // Sign the request. + if c.signature.isV4() { + // Signature calculated for MakeBucket request should be for 'us-east-1', + // regardless of the bucket's location constraint. + req = SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1") + } else if c.signature.isV2() { + req = SignV2(*req, c.accessKeyID, c.secretAccessKey) + } + + // Return signed request. + return req, nil +} + +// SetBucketACL set the permissions on an existing bucket using access control lists (ACL). +// +// For example +// +// private - owner gets full access [default]. +// public-read - owner gets full access, all others get read access. +// public-read-write - owner gets full access, all others get full access too. +// authenticated-read - owner gets full access, authenticated users get read access. +func (c Client) SetBucketACL(bucketName string, acl BucketACL) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + if !acl.isValidBucketACL() { + return ErrInvalidArgument("Unrecognized ACL " + acl.String()) + } + + // Set acl query. + urlValues := make(url.Values) + urlValues.Set("acl", "") + + // Add misc headers. + customHeader := make(http.Header) + + if acl != "" { + customHeader.Set("x-amz-acl", acl.String()) + } else { + customHeader.Set("x-amz-acl", "private") + } + + // Instantiate a new request. + req, err := c.newRequest("PUT", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + customHeader: customHeader, + }) + if err != nil { + return err + } + + // Initiate the request. + resp, err := c.httpClient.Do(req) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp != nil { + // if error return. + if resp.StatusCode != http.StatusOK { + return HTTPRespToErrorResponse(resp, bucketName, "") + } + } + + // return + return nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-partial.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-partial.go new file mode 100644 index 000000000..3b7a5b733 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-partial.go @@ -0,0 +1,197 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/md5" + "crypto/sha256" + "errors" + "hash" + "io" + "sort" +) + +// PutObjectPartial put object partial. +func (c Client) PutObjectPartial(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + + // Cleanup any previously left stale files, as the function exits. + defer cleanupStaleTempfiles("multiparts$-putobject-partial") + + // getUploadID for an object, initiates a new multipart request + // if it cannot find any previously partially uploaded object. + uploadID, err := c.getUploadID(bucketName, objectName, contentType) + if err != nil { + return 0, err + } + + // total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var completeMultipartUpload completeMultipartUpload + + // Fetch previously upload parts and save the total size. + partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) + if err != nil { + return 0, err + } + + // Previous maximum part size + var prevMaxPartSize int64 + // previous part number. + var prevPartNumber int + // Loop through all parts and calculate totalUploadedSize. + for _, partInfo := range partsInfo { + totalUploadedSize += partInfo.Size + // Choose the maximum part size. + if partInfo.Size >= prevMaxPartSize { + prevMaxPartSize = partInfo.Size + } + // Save previous part number. + prevPartNumber = partInfo.PartNumber + } + + // Calculate the optimal part size for a given file size. + partSize := optimalPartSize(size) + // If prevMaxPartSize is set use that. + if prevMaxPartSize != 0 { + partSize = prevMaxPartSize + } + + // MD5 and Sha256 hasher. + var hashMD5, hashSha256 hash.Hash + + // Part number always starts with prevPartNumber + 1. i.e The next part number. + partNumber := prevPartNumber + 1 + + // Loop through until EOF. + for totalUploadedSize < size { + // Initialize a new temporary file. + tmpFile, err := newTempFile("multiparts$-putobject-partial") + if err != nil { + return 0, err + } + + // Create a hash multiwriter. + hashMD5 = md5.New() + hashWriter := io.MultiWriter(hashMD5) + if c.signature.isV4() { + hashSha256 = sha256.New() + hashWriter = io.MultiWriter(hashMD5, hashSha256) + } + writer := io.MultiWriter(tmpFile, hashWriter) + + // totalUploadedSize is the current readAtOffset. + readAtOffset := totalUploadedSize + + // Read until partSize. + var totalReadPartSize int64 + + // readAt defaults to reading at 5MiB buffer. + readAtBuffer := make([]byte, optimalReadAtBufferSize) + + // Loop through until partSize. + for totalReadPartSize < partSize { + readAtSize, rerr := data.ReadAt(readAtBuffer, readAtOffset) + if rerr != nil { + if rerr != io.EOF { + return 0, rerr + } + } + writeSize, werr := writer.Write(readAtBuffer[:readAtSize]) + if werr != nil { + return 0, werr + } + if readAtSize != writeSize { + return 0, errors.New("Something really bad happened here. " + reportIssue) + } + readAtOffset += int64(writeSize) + totalReadPartSize += int64(writeSize) + if rerr == io.EOF { + break + } + } + + // Seek back to beginning of the temporary file. + if _, err := tmpFile.Seek(0, 0); err != nil { + return 0, err + } + + // Save all the part metadata. + partMdata := partMetadata{ + ReadCloser: tmpFile, + MD5Sum: hashMD5.Sum(nil), + Size: totalReadPartSize, + } + + // Signature version '4'. + if c.signature.isV4() { + partMdata.Sha256Sum = hashSha256.Sum(nil) + } + + // Current part number to be uploaded. + partMdata.Number = partNumber + + // execute upload part. + objPart, err := c.uploadPart(bucketName, objectName, uploadID, partMdata) + if err != nil { + // Close the read closer. + partMdata.ReadCloser.Close() + return totalUploadedSize, err + } + + // Save successfully uploaded size. + totalUploadedSize += partMdata.Size + + // Save successfully uploaded part metadata. + partsInfo[partMdata.Number] = objPart + + // Move to next part. + partNumber++ + } + + // If size is greater than zero verify totalUploaded. + // if totalUploaded is different than the input 'size', do not complete the request throw an error. + if totalUploadedSize != size { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + + // Loop over uploaded parts to save them in a Parts array before completing the multipart request. + for _, part := range partsInfo { + var complPart completePart + complPart.ETag = part.ETag + complPart.PartNumber = part.PartNumber + completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart) + } + + // Sort all completed parts. + sort.Sort(completedParts(completeMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload) + if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go new file mode 100644 index 000000000..a02df778a --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go @@ -0,0 +1,559 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/hex" + "encoding/xml" + "fmt" + "hash" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" +) + +// completedParts is a collection of parts sortable by their part numbers. +// used for sorting the uploaded parts before completing the multipart request. +type completedParts []completePart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } + +// PutObject creates an object in a bucket. +// +// You must have WRITE permissions on a bucket to create an object. +// +// - For size smaller than 5MiB PutObject automatically does a single atomic Put operation. +// - For size larger than 5MiB PutObject automatically does a resumable multipart Put operation. +// - For size input as -1 PutObject does a multipart Put operation until input stream reaches EOF. +// Maximum object size that can be uploaded through this operation will be 5TiB. +// +// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs. +// Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers. +// +// NOTE: For anonymous requests Amazon S3 doesn't allow multipart upload. So we fall back to single PUT operation. +func (c Client) PutObject(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + + // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs. + // Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers. + if isGoogleEndpoint(c.endpointURL) { + if size <= -1 { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: "Content-Length cannot be negative for file uploads to Google Cloud Storage.", + Key: objectName, + BucketName: bucketName, + } + } + // Do not compute MD5 for Google Cloud Storage. Uploads upto 5GiB in size. + return c.putNoChecksum(bucketName, objectName, data, size, contentType) + } + + // NOTE: S3 doesn't allow anonymous multipart requests. + if isAmazonEndpoint(c.endpointURL) && c.anonymous { + if size <= -1 || size > int64(maxSinglePutObjectSize) { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", size), + Key: objectName, + BucketName: bucketName, + } + } + // Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GiB in size. + return c.putAnonymous(bucketName, objectName, data, size, contentType) + } + + // Large file upload is initiated for uploads for input data size + // if its greater than 5MiB or data size is negative. + if size >= minimumPartSize || size < 0 { + return c.putLargeObject(bucketName, objectName, data, size, contentType) + } + return c.putSmallObject(bucketName, objectName, data, size, contentType) +} + +// putNoChecksum special function used Google Cloud Storage. This special function +// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. +func (c Client) putNoChecksum(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + if size > maxPartSize { + return 0, ErrEntityTooLarge(size, bucketName, objectName) + } + // For anonymous requests, we will not calculate sha256 and md5sum. + putObjMetadata := putObjectMetadata{ + MD5Sum: nil, + Sha256Sum: nil, + ReadCloser: ioutil.NopCloser(data), + Size: size, + ContentType: contentType, + } + // Execute put object. + if _, err := c.putObject(bucketName, objectName, putObjMetadata); err != nil { + return 0, err + } + return size, nil +} + +// putAnonymous is a special function for uploading content as anonymous request. +// This special function is necessary since Amazon S3 doesn't allow anonymous +// multipart uploads. +func (c Client) putAnonymous(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + return c.putNoChecksum(bucketName, objectName, data, size, contentType) +} + +// putSmallObject uploads files smaller than 5 mega bytes. +func (c Client) putSmallObject(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + // Read input data fully into buffer. + dataBytes, err := ioutil.ReadAll(data) + if err != nil { + return 0, err + } + if int64(len(dataBytes)) != size { + return 0, ErrUnexpectedEOF(int64(len(dataBytes)), size, bucketName, objectName) + } + // Construct a new PUT object metadata. + putObjMetadata := putObjectMetadata{ + MD5Sum: sumMD5(dataBytes), + Sha256Sum: sum256(dataBytes), + ReadCloser: ioutil.NopCloser(bytes.NewReader(dataBytes)), + Size: size, + ContentType: contentType, + } + // Single part use case, use putObject directly. + if _, err := c.putObject(bucketName, objectName, putObjMetadata); err != nil { + return 0, err + } + return size, nil +} + +// hashCopy - calculates Md5sum and Sha256sum for upto partSize amount of bytes. +func (c Client) hashCopy(writer io.ReadWriteSeeker, data io.Reader, partSize int64) (md5Sum, sha256Sum []byte, size int64, err error) { + // MD5 and Sha256 hasher. + var hashMD5, hashSha256 hash.Hash + // MD5 and Sha256 hasher. + hashMD5 = md5.New() + hashWriter := io.MultiWriter(writer, hashMD5) + if c.signature.isV4() { + hashSha256 = sha256.New() + hashWriter = io.MultiWriter(writer, hashMD5, hashSha256) + } + + // Copies to input at writer. + size, err = io.CopyN(hashWriter, data, partSize) + if err != nil { + if err != io.EOF { + return nil, nil, 0, err + } + } + + // Seek back to beginning of input. + if _, err := writer.Seek(0, 0); err != nil { + return nil, nil, 0, err + } + + // Finalize md5shum and sha256 sum. + md5Sum = hashMD5.Sum(nil) + if c.signature.isV4() { + sha256Sum = hashSha256.Sum(nil) + } + return md5Sum, sha256Sum, size, nil +} + +// putLargeObject uploads files bigger than 5 mega bytes. +func (c Client) putLargeObject(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + + // Cleanup any previously left stale files, as the function exits. + defer cleanupStaleTempfiles("multiparts$-putobject") + + // getUploadID for an object, initiates a new multipart request + // if it cannot find any previously partially uploaded object. + uploadID, err := c.getUploadID(bucketName, objectName, contentType) + if err != nil { + return 0, err + } + + // total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var completeMultipartUpload completeMultipartUpload + + // Fetch previously upload parts and save the total size. + partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) + if err != nil { + return 0, err + } + // Previous maximum part size + var prevMaxPartSize int64 + // Loop through all parts and calculate totalUploadedSize. + for _, partInfo := range partsInfo { + totalUploadedSize += partInfo.Size + // Choose the maximum part size. + if partInfo.Size >= prevMaxPartSize { + prevMaxPartSize = partInfo.Size + } + } + + // Calculate the optimal part size for a given size. + partSize := optimalPartSize(size) + // If prevMaxPartSize is set use that. + if prevMaxPartSize != 0 { + partSize = prevMaxPartSize + } + + // Part number always starts with '1'. + partNumber := 1 + + // Loop through until EOF. + for { + // We have reached EOF, break out. + if totalUploadedSize == size { + break + } + + // Initialize a new temporary file. + tmpFile, err := newTempFile("multiparts$-putobject") + if err != nil { + return 0, err + } + + // Calculates MD5 and Sha256 sum while copying partSize bytes into tmpFile. + md5Sum, sha256Sum, size, err := c.hashCopy(tmpFile, data, partSize) + if err != nil { + if err != io.EOF { + return 0, err + } + } + + // Save all the part metadata. + partMdata := partMetadata{ + ReadCloser: tmpFile, + Size: size, + MD5Sum: md5Sum, + Sha256Sum: sha256Sum, + Number: partNumber, // Current part number to be uploaded. + } + + // If part number already uploaded, move to the next one. + if isPartUploaded(objectPart{ + ETag: hex.EncodeToString(partMdata.MD5Sum), + PartNumber: partNumber, + }, partsInfo) { + // Close the read closer. + partMdata.ReadCloser.Close() + continue + } + + // execute upload part. + objPart, err := c.uploadPart(bucketName, objectName, uploadID, partMdata) + if err != nil { + // Close the read closer. + partMdata.ReadCloser.Close() + return totalUploadedSize, err + } + + // Save successfully uploaded size. + totalUploadedSize += partMdata.Size + + // Save successfully uploaded part metadata. + partsInfo[partMdata.Number] = objPart + + // Move to next part. + partNumber++ + } + + // If size is greater than zero verify totalWritten. + // if totalWritten is different than the input 'size', do not complete the request throw an error. + if size > 0 { + if totalUploadedSize != size { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + } + + // Loop over uploaded parts to save them in a Parts array before completing the multipart request. + for _, part := range partsInfo { + var complPart completePart + complPart.ETag = part.ETag + complPart.PartNumber = part.PartNumber + completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart) + } + + // Sort all completed parts. + sort.Sort(completedParts(completeMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload) + if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} + +// putObject - add an object to a bucket. +// NOTE: You must have WRITE permissions on a bucket to add an object to it. +func (c Client) putObject(bucketName, objectName string, putObjMetadata putObjectMetadata) (ObjectStat, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return ObjectStat{}, err + } + if err := isValidObjectName(objectName); err != nil { + return ObjectStat{}, err + } + + if strings.TrimSpace(putObjMetadata.ContentType) == "" { + putObjMetadata.ContentType = "application/octet-stream" + } + + // Set headers. + customHeader := make(http.Header) + customHeader.Set("Content-Type", putObjMetadata.ContentType) + + // Populate request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: customHeader, + contentBody: putObjMetadata.ReadCloser, + contentLength: putObjMetadata.Size, + contentSha256Bytes: putObjMetadata.Sha256Sum, + contentMD5Bytes: putObjMetadata.MD5Sum, + } + // Initiate new request. + req, err := c.newRequest("PUT", reqMetadata) + if err != nil { + return ObjectStat{}, err + } + // Execute the request. + resp, err := c.httpClient.Do(req) + defer closeResponse(resp) + if err != nil { + return ObjectStat{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ObjectStat{}, HTTPRespToErrorResponse(resp, bucketName, objectName) + } + } + var metadata ObjectStat + // Trim off the odd double quotes from ETag. + metadata.ETag = strings.Trim(resp.Header.Get("ETag"), "\"") + // A success here means data was written to server successfully. + metadata.Size = putObjMetadata.Size + return metadata, nil +} + +// initiateMultipartUpload initiates a multipart upload and returns an upload ID. +func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return initiateMultipartUploadResult{}, err + } + if err := isValidObjectName(objectName); err != nil { + return initiateMultipartUploadResult{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploads", "") + + if contentType == "" { + contentType = "application/octet-stream" + } + + // set ContentType header. + customHeader := make(http.Header) + customHeader.Set("Content-Type", contentType) + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: customHeader, + } + + // Instantiate the request. + req, err := c.newRequest("POST", reqMetadata) + if err != nil { + return initiateMultipartUploadResult{}, err + } + // Execute the request. + resp, err := c.httpClient.Do(req) + defer closeResponse(resp) + if err != nil { + return initiateMultipartUploadResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return initiateMultipartUploadResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode xml initiate multipart. + initiateMultipartUploadResult := initiateMultipartUploadResult{} + err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) + if err != nil { + return initiateMultipartUploadResult, err + } + return initiateMultipartUploadResult, nil +} + +// uploadPart uploads a part in a multipart upload. +func (c Client) uploadPart(bucketName, objectName, uploadID string, uploadingPart partMetadata) (objectPart, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return objectPart{}, err + } + if err := isValidObjectName(objectName); err != nil { + return objectPart{}, err + } + + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number. + urlValues.Set("partNumber", strconv.Itoa(uploadingPart.Number)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: uploadingPart.ReadCloser, + contentLength: uploadingPart.Size, + contentSha256Bytes: uploadingPart.Sha256Sum, + contentMD5Bytes: uploadingPart.MD5Sum, + } + + // Instantiate a request. + req, err := c.newRequest("PUT", reqMetadata) + if err != nil { + return objectPart{}, err + } + // Execute the request. + resp, err := c.httpClient.Do(req) + defer closeResponse(resp) + if err != nil { + return objectPart{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return objectPart{}, HTTPRespToErrorResponse(resp, bucketName, objectName) + } + } + // Once successfully uploaded, return completed part. + objPart := objectPart{} + objPart.PartNumber = uploadingPart.Number + objPart.ETag = resp.Header.Get("ETag") + return objPart, nil +} + +// completeMultipartUpload completes a multipart upload by assembling previously uploaded parts. +func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return completeMultipartUploadResult{}, err + } + if err := isValidObjectName(objectName); err != nil { + return completeMultipartUploadResult{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + + // Marshal complete multipart body. + completeMultipartUploadBytes, err := xml.Marshal(complete) + if err != nil { + return completeMultipartUploadResult{}, err + } + + // Instantiate all the complete multipart buffer. + completeMultipartUploadBuffer := bytes.NewBuffer(completeMultipartUploadBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: ioutil.NopCloser(completeMultipartUploadBuffer), + contentLength: int64(completeMultipartUploadBuffer.Len()), + contentSha256Bytes: sum256(completeMultipartUploadBuffer.Bytes()), + } + + // Instantiate the request. + req, err := c.newRequest("POST", reqMetadata) + if err != nil { + return completeMultipartUploadResult{}, err + } + + // Execute the request. + resp, err := c.httpClient.Do(req) + defer closeResponse(resp) + if err != nil { + return completeMultipartUploadResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return completeMultipartUploadResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName) + } + } + // If successful response, decode the body. + completeMultipartUploadResult := completeMultipartUploadResult{} + err = xmlDecoder(resp.Body, &completeMultipartUploadResult) + if err != nil { + return completeMultipartUploadResult, err + } + return completeMultipartUploadResult, nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-remove.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-remove.go new file mode 100644 index 000000000..0330c9538 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-remove.go @@ -0,0 +1,169 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net/http" + "net/url" +) + +// RemoveBucket deletes the bucket name. +// +// All objects (including all object versions and delete markers). +// in the bucket must be deleted before successfully attempting this request. +func (c Client) RemoveBucket(bucketName string) error { + if err := isValidBucketName(bucketName); err != nil { + return err + } + req, err := c.newRequest("DELETE", requestMetadata{ + bucketName: bucketName, + }) + if err != nil { + return err + } + resp, err := c.httpClient.Do(req) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return HTTPRespToErrorResponse(resp, bucketName, "") + } + } + + // Remove the location from cache on a successful delete. + c.bucketLocCache.Delete(bucketName) + + return nil +} + +// RemoveObject remove an object from a bucket. +func (c Client) RemoveObject(bucketName, objectName string) error { + if err := isValidBucketName(bucketName); err != nil { + return err + } + if err := isValidObjectName(objectName); err != nil { + return err + } + req, err := c.newRequest("DELETE", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + }) + if err != nil { + return err + } + resp, err := c.httpClient.Do(req) + defer closeResponse(resp) + if err != nil { + return err + } + // DeleteObject always responds with http '204' even for + // objects which do not exist. So no need to handle them + // specifically. + return nil +} + +// RemoveIncompleteUpload aborts an partially uploaded object. +// Requires explicit authentication, no anonymous requests are allowed for multipart API. +func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { + // Validate input arguments. + if err := isValidBucketName(bucketName); err != nil { + return err + } + if err := isValidObjectName(objectName); err != nil { + return err + } + errorCh := make(chan error) + go func(errorCh chan<- error) { + defer close(errorCh) + // Find multipart upload id of the object. + uploadID, err := c.findUploadID(bucketName, objectName) + if err != nil { + errorCh <- err + return + } + if uploadID != "" { + // If uploadID is not an empty string, initiate the request. + err := c.abortMultipartUpload(bucketName, objectName, uploadID) + if err != nil { + errorCh <- err + return + } + return + } + }(errorCh) + err, ok := <-errorCh + if ok && err != nil { + return err + } + return nil +} + +// abortMultipartUpload aborts a multipart upload for the given uploadID, all parts are deleted. +func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error { + // Validate input arguments. + if err := isValidBucketName(bucketName); err != nil { + return err + } + if err := isValidObjectName(objectName); err != nil { + return err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + + // Instantiate a new DELETE request. + req, err := c.newRequest("DELETE", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + }) + if err != nil { + return err + } + // execute the request. + resp, err := c.httpClient.Do(req) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + // Abort has no response body, handle it. + var errorResponse ErrorResponse + switch resp.StatusCode { + case http.StatusNotFound: + // This is needed specifically for Abort and it cannot be converged. + errorResponse = ErrorResponse{ + Code: "NoSuchUpload", + Message: "The specified multipart upload does not exist.", + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + default: + return HTTPRespToErrorResponse(resp, bucketName, objectName) + } + return errorResponse + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/definitions.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-s3-definitions.go similarity index 92% rename from Godeps/_workspace/src/github.com/minio/minio-go/definitions.go rename to Godeps/_workspace/src/github.com/minio/minio-go/api-s3-definitions.go index a9a69db6b..16d87a70e 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/definitions.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-s3-definitions.go @@ -90,8 +90,8 @@ type initiator struct { DisplayName string } -// partMetadata container for particular part of an object -type partMetadata struct { +// objectPart container for particular part of an object +type objectPart struct { // Part number identifies the part. PartNumber int @@ -103,6 +103,9 @@ type partMetadata struct { // Size of the uploaded part data. Size int64 + + // Error + Err error } // listObjectPartsResult container for ListObjectParts response. @@ -121,7 +124,7 @@ type listObjectPartsResult struct { // Indicates whether the returned list of parts is truncated. IsTruncated bool - Parts []partMetadata `xml:"Part"` + ObjectParts []objectPart `xml:"Part"` EncodingType string } @@ -162,7 +165,9 @@ type createBucketConfiguration struct { Location string `xml:"LocationConstraint"` } +// grant container for the grantee and his or her permissions. type grant struct { + // grantee container for DisplayName and ID of the person being granted permissions. Grantee struct { ID string DisplayName string @@ -173,7 +178,9 @@ type grant struct { Permission string } +// accessControlPolicy contains the elements providing ACL permissions for a bucket. type accessControlPolicy struct { + // accessControlList container for ACL information. AccessControlList struct { Grant []grant } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go new file mode 100644 index 000000000..29bd83fd9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go @@ -0,0 +1,113 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net/http" + "strconv" + "strings" + "time" +) + +// BucketExists verify if bucket exists and you have permission to access it. +func (c Client) BucketExists(bucketName string) error { + if err := isValidBucketName(bucketName); err != nil { + return err + } + req, err := c.newRequest("HEAD", requestMetadata{ + bucketName: bucketName, + }) + if err != nil { + return err + } + resp, err := c.httpClient.Do(req) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return HTTPRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// StatObject verifies if object exists and you have permission to access. +func (c Client) StatObject(bucketName, objectName string) (ObjectStat, error) { + if err := isValidBucketName(bucketName); err != nil { + return ObjectStat{}, err + } + if err := isValidObjectName(objectName); err != nil { + return ObjectStat{}, err + } + // Instantiate a new request. + req, err := c.newRequest("HEAD", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + }) + if err != nil { + return ObjectStat{}, err + } + resp, err := c.httpClient.Do(req) + defer closeResponse(resp) + if err != nil { + return ObjectStat{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ObjectStat{}, HTTPRespToErrorResponse(resp, bucketName, objectName) + } + } + md5sum := strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + return ObjectStat{}, ErrorResponse{ + Code: "InternalError", + Message: "Content-Length is invalid. " + reportIssue, + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + } + date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) + if err != nil { + return ObjectStat{}, ErrorResponse{ + Code: "InternalError", + Message: "Last-Modified time format is invalid. " + reportIssue, + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + } + } + contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) + if contentType == "" { + contentType = "application/octet-stream" + } + // Save object metadata info. + var objectStat ObjectStat + objectStat.ETag = md5sum + objectStat.Key = objectName + objectStat.Size = size + objectStat.LastModified = date + objectStat.ContentType = contentType + return objectStat, nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api.go b/Godeps/_workspace/src/github.com/minio/minio-go/api.go index 27ad4ca94..788a74d4d 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api.go @@ -17,1152 +17,331 @@ package minio import ( + "encoding/base64" "encoding/hex" - "errors" "io" "net/http" "net/url" - "path/filepath" "runtime" - "sort" - "strconv" - "strings" - "sync" "time" ) -// API - Cloud Storage API interface -type API interface { - // Bucket Read/Write/Stat operations - BucketAPI +// Client implements Amazon S3 compatible methods. +type Client struct { + /// Standard options. + accessKeyID string // AccessKeyID required for authorized requests. + secretAccessKey string // SecretAccessKey required for authorized requests. + signature SignatureType // Choose a signature type if necessary. + anonymous bool // Set to 'true' if Client has no access and secret keys. - // Object Read/Write/Stat operations - ObjectAPI - - // Presigned API - PresignedAPI -} - -// BucketAPI - bucket specific Read/Write/Stat interface -type BucketAPI interface { - MakeBucket(bucket string, cannedACL BucketACL) error - BucketExists(bucket string) error - RemoveBucket(bucket string) error - SetBucketACL(bucket string, cannedACL BucketACL) error - GetBucketACL(bucket string) (BucketACL, error) - - ListBuckets() <-chan BucketStatCh - ListObjects(bucket, prefix string, recursive bool) <-chan ObjectStatCh - ListIncompleteUploads(bucket, prefix string, recursive bool) <-chan ObjectMultipartStatCh -} - -// ObjectAPI - object specific Read/Write/Stat interface -type ObjectAPI interface { - GetObject(bucket, object string) (io.ReadCloser, ObjectStat, error) - GetPartialObject(bucket, object string, offset, length int64) (io.ReadCloser, ObjectStat, error) - PutObject(bucket, object, contentType string, size int64, data io.Reader) error - StatObject(bucket, object string) (ObjectStat, error) - RemoveObject(bucket, object string) error - - RemoveIncompleteUpload(bucket, object string) <-chan error -} - -// PresignedAPI - object specific for now -type PresignedAPI interface { - PresignedGetObject(bucket, object string, expires time.Duration) (string, error) - PresignedPutObject(bucket, object string, expires time.Duration) (string, error) - PresignedPostPolicy(*PostPolicy) (map[string]string, error) -} - -// BucketStatCh - bucket metadata over read channel -type BucketStatCh struct { - Stat BucketStat - Err error -} - -// ObjectStatCh - object metadata over read channel -type ObjectStatCh struct { - Stat ObjectStat - Err error -} - -// ObjectMultipartStatCh - multipart object metadata over read channel -type ObjectMultipartStatCh struct { - Stat ObjectMultipartStat - Err error -} - -// BucketStat container for bucket metadata -type BucketStat struct { - // The name of the bucket. - Name string - // Date the bucket was created. - CreationDate time.Time -} - -// ObjectStat container for object metadata -type ObjectStat struct { - ETag string - Key string - LastModified time.Time - Size int64 - ContentType string - - Owner struct { - DisplayName string - ID string + // User supplied. + appInfo struct { + appName string + appVersion string } + endpointURL *url.URL - // The class of storage used to store the object. - StorageClass string + // Needs allocation. + httpClient *http.Client + bucketLocCache *bucketLocationCache } -// ObjectMultipartStat container for multipart object metadata -type ObjectMultipartStat struct { - // Date and time at which the multipart upload was initiated. - Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` - - Initiator initiator - Owner owner - - StorageClass string - - // Key of the object for which the multipart upload was initiated. - Key string - Size int64 - - // Upload ID that identifies the multipart upload. - UploadID string `xml:"UploadId"` -} - -// Regions s3 region map used by bucket location constraint -var regions = map[string]string{ - "s3-fips-us-gov-west-1.amazonaws.com": "us-gov-west-1", - "s3.amazonaws.com": "us-east-1", - "s3-external-1.amazonaws.com": "us-east-1", - "s3-us-west-1.amazonaws.com": "us-west-1", - "s3-us-west-2.amazonaws.com": "us-west-2", - "s3-eu-west-1.amazonaws.com": "eu-west-1", - "s3-eu-central-1.amazonaws.com": "eu-central-1", - "s3-ap-southeast-1.amazonaws.com": "ap-southeast-1", - "s3-ap-southeast-2.amazonaws.com": "ap-southeast-2", - "s3-ap-northeast-1.amazonaws.com": "ap-northeast-1", - "s3-sa-east-1.amazonaws.com": "sa-east-1", - "s3.cn-north-1.amazonaws.com.cn": "cn-north-1", - - // Add google cloud storage as one of the regions - "storage.googleapis.com": "google", -} - -// getRegion returns a region based on its endpoint mapping. -func getRegion(host string) (region string) { - if _, ok := regions[host]; ok { - return regions[host] - } - // Region cannot be empty according to Amazon S3. - // So we address all the four quadrants of our galaxy. - return "milkyway" -} - -// SignatureType is type of signature to be used for a request -type SignatureType int - -// Different types of supported signatures - default is Latest i.e SignatureV4 +// Global constants. const ( - Latest SignatureType = iota - SignatureV4 - SignatureV2 + libraryName = "minio-go" + libraryVersion = "0.2.5" ) -// isV2 - is signature SignatureV2? -func (s SignatureType) isV2() bool { - return s == SignatureV2 +// User Agent should always following the below style. +// Please open an issue to discuss any new changes here. +// +// Minio (OS; ARCH) LIB/VER APP/VER +const ( + libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") " + libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion +) + +// NewV2 - instantiate minio client with Amazon S3 signature version '2' compatiblity. +func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) { + clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure) + if err != nil { + return nil, err + } + // Set to use signature version '2'. + clnt.signature = SignatureV2 + return clnt, nil } -// isV4 - is signature SignatureV4? -func (s SignatureType) isV4() bool { - return s == SignatureV4 +// NewV4 - instantiate minio client with Amazon S3 signature version '4' compatibility. +func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) { + clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure) + if err != nil { + return nil, err + } + // Set to use signature version '4'. + clnt.signature = SignatureV4 + return clnt, nil } -// isLatest - is signature Latest? -func (s SignatureType) isLatest() bool { - return s == Latest +// New - instantiate minio client Client, adds automatic verification of signature. +func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) { + clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure) + if err != nil { + return nil, err + } + // Google cloud storage should be set to signature V2, force it if not. + if isGoogleEndpoint(clnt.endpointURL) { + clnt.signature = SignatureV2 + } + // If Amazon S3 set to signature v2. + if isAmazonEndpoint(clnt.endpointURL) { + clnt.signature = SignatureV4 + } + return clnt, nil } -// Config - main configuration struct used by all to set endpoint, credentials, and other options for requests. -type Config struct { - // Standard options - AccessKeyID string - SecretAccessKey string - Endpoint string - Signature SignatureType +func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) { + // construct endpoint. + endpointURL, err := getEndpointURL(endpoint, insecure) + if err != nil { + return nil, err + } - // Advanced options - // Specify this to get server response in non XML style if server supports it - AcceptType string - // Optional field. If empty, region is determined automatically. - Region string + // instantiate new Client. + clnt := new(Client) + clnt.accessKeyID = accessKeyID + clnt.secretAccessKey = secretAccessKey + if clnt.accessKeyID == "" || clnt.secretAccessKey == "" { + clnt.anonymous = true + } - // Expert options - // - // Set this to override default transport ``http.DefaultTransport`` + // Save endpoint URL, user agent for future uses. + clnt.endpointURL = endpointURL + + // Instantiate http client and bucket location cache. + clnt.httpClient = &http.Client{} + clnt.bucketLocCache = newBucketLocationCache() + + // Return. + return clnt, nil +} + +// SetAppInfo - add application details to user agent. +func (c *Client) SetAppInfo(appName string, appVersion string) { + // if app name and version is not set, we do not a new user agent. + if appName != "" && appVersion != "" { + c.appInfo = struct { + appName string + appVersion string + }{} + c.appInfo.appName = appName + c.appInfo.appVersion = appVersion + } +} + +// SetCustomTransport - set new custom transport. +func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) { + // Set this to override default transport ``http.DefaultTransport``. // // This transport is usually needed for debugging OR to add your own // custom TLS certificates on the client transport, for custom CA's and - // certs which are not part of standard certificate authority + // certs which are not part of standard certificate authority follow this + // example :- // - // For example :- + // tr := &http.Transport{ + // TLSClientConfig: &tls.Config{RootCAs: pool}, + // DisableCompression: true, + // } + // api.SetTransport(tr) // - // tr := &http.Transport{ - // TLSClientConfig: &tls.Config{RootCAs: pool}, - // DisableCompression: true, - // } - // - Transport http.RoundTripper - - // internal - // use SetUserAgent append to default, useful when minio-go is used with in your application - userAgent string - isUserAgentSet bool // allow user agent's to be set only once - isVirtualStyle bool // set when virtual hostnames are on -} - -// Global constants -const ( - LibraryName = "minio-go" - LibraryVersion = "0.2.5" -) - -// SetUserAgent - append to a default user agent -func (c *Config) SetUserAgent(name string, version string, comments ...string) { - if c.isUserAgentSet { - // if user agent already set do not set it - return - } - // if no name and version is set we do not add new user agents - if name != "" && version != "" { - c.userAgent = c.userAgent + " " + name + "/" + version + " (" + strings.Join(comments, "; ") + ") " - c.isUserAgentSet = true + if c.httpClient != nil { + c.httpClient.Transport = customHTTPTransport } } -type api struct { - apiCore +// requestMetadata - is container for all the values to make a request. +type requestMetadata struct { + // If set newRequest presigns the URL. + presignURL bool + + // User supplied. + bucketName string + objectName string + queryValues url.Values + customHeader http.Header + expires int64 + + // Generated by our internal code. + contentBody io.ReadCloser + contentLength int64 + contentSha256Bytes []byte + contentMD5Bytes []byte } -// New - instantiate a new minio api client -func New(config Config) (API, error) { - if strings.TrimSpace(config.Region) == "" || len(config.Region) == 0 { - u, err := url.Parse(config.Endpoint) +func (c Client) newRequest(method string, metadata requestMetadata) (*http.Request, error) { + // If no method is supplied default to 'POST'. + if method == "" { + method = "POST" + } + + // construct a new target URL. + targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, metadata.queryValues) + if err != nil { + return nil, err + } + + // get a new HTTP request for the method. + req, err := http.NewRequest(method, targetURL.String(), nil) + if err != nil { + return nil, err + } + + // Gather location only if bucketName is present. + location := "us-east-1" // Default all other requests to "us-east-1". + if metadata.bucketName != "" { + location, err = c.getBucketLocation(metadata.bucketName) if err != nil { - return api{}, err - } - match, _ := filepath.Match("*.s3*.amazonaws.com", u.Host) - if match { - config.isVirtualStyle = true - hostSplits := strings.SplitN(u.Host, ".", 2) - u.Host = hostSplits[1] - } - matchGoogle, _ := filepath.Match("*.storage.googleapis.com", u.Host) - if matchGoogle { - config.isVirtualStyle = true - hostSplits := strings.SplitN(u.Host, ".", 2) - u.Host = hostSplits[1] - } - config.Region = getRegion(u.Host) - if config.Region == "google" { - // Google cloud storage is signature V2 - config.Signature = SignatureV2 + return nil, err } } - config.SetUserAgent(LibraryName, LibraryVersion, runtime.GOOS, runtime.GOARCH) - config.isUserAgentSet = false // default - return api{apiCore{&config}}, nil -} -// PresignedPostPolicy return POST form data that can be used for object upload -func (a api) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { - if p.expiration.IsZero() { - return nil, errors.New("Expiration time must be specified") - } - if _, ok := p.formData["key"]; !ok { - return nil, errors.New("object key must be specified") - } - if _, ok := p.formData["bucket"]; !ok { - return nil, errors.New("bucket name must be specified") - } - return a.presignedPostPolicy(p), nil -} - -/// Object operations - -/// Expires maximum is 7days - ie. 604800 and minimum is 1 - -// PresignedPutObject get a presigned URL to upload an object -func (a api) PresignedPutObject(bucket, object string, expires time.Duration) (string, error) { - expireSeconds := int64(expires / time.Second) - if expireSeconds < 1 || expireSeconds > 604800 { - return "", invalidArgumentError("") - } - return a.presignedPutObject(bucket, object, expireSeconds) -} - -// PresignedGetObject get a presigned URL to retrieve an object for third party apps -func (a api) PresignedGetObject(bucket, object string, expires time.Duration) (string, error) { - expireSeconds := int64(expires / time.Second) - if expireSeconds < 1 || expireSeconds > 604800 { - return "", invalidArgumentError("") - } - return a.presignedGetObject(bucket, object, expireSeconds, 0, 0) -} - -// GetObject retrieve object - -// Downloads full object with no ranges, if you need ranges use GetPartialObject -func (a api) GetObject(bucket, object string) (io.ReadCloser, ObjectStat, error) { - if err := invalidBucketError(bucket); err != nil { - return nil, ObjectStat{}, err - } - if err := invalidObjectError(object); err != nil { - return nil, ObjectStat{}, err - } - // get object - return a.getObject(bucket, object, 0, 0) -} - -// GetPartialObject retrieve partial object -// -// Takes range arguments to download the specified range bytes of an object. -// Setting offset and length = 0 will download the full object. -// For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. -func (a api) GetPartialObject(bucket, object string, offset, length int64) (io.ReadCloser, ObjectStat, error) { - if err := invalidBucketError(bucket); err != nil { - return nil, ObjectStat{}, err - } - if err := invalidObjectError(object); err != nil { - return nil, ObjectStat{}, err - } - // get partial object - return a.getObject(bucket, object, offset, length) -} - -// completedParts is a wrapper to make parts sortable by their part number -// multi part completion requires list of multi parts to be sorted -type completedParts []completePart - -func (a completedParts) Len() int { return len(a) } -func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } - -// minimumPartSize minimum part size per object after which PutObject behaves internally as multipart -var minimumPartSize int64 = 1024 * 1024 * 5 - -// maxParts - unexported right now -var maxParts = int64(10000) - -// maxPartSize - unexported right now -var maxPartSize int64 = 1024 * 1024 * 1024 * 5 - -// maxConcurrentQueue - max concurrent upload queue -var maxConcurrentQueue int64 = 4 - -// calculatePartSize - calculate the optimal part size for the given objectSize -// -// NOTE: Assumption here is that for any given object upload to a S3 compatible object -// storage it will have the following parameters as constants -// -// maxParts -// maximumPartSize -// minimumPartSize -// -// if a the partSize after division with maxParts is greater than minimumPartSize -// then choose that to be the new part size, if not return MinimumPartSize -// -// special case where it happens to be that partSize is indeed bigger than the -// maximum part size just return maxPartSize back -func calculatePartSize(objectSize int64) int64 { - // make sure last part has enough buffer and handle this poperly - partSize := (objectSize / (maxParts - 1)) - if partSize > minimumPartSize { - if partSize > maxPartSize { - return maxPartSize + // If presigned request, return quickly. + if metadata.expires != 0 { + if c.anonymous { + return nil, ErrInvalidArgument("Requests cannot be presigned with anonymous credentials.") } - return partSize + if c.signature.isV2() { + // Presign URL with signature v2. + req = PreSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires) + } else { + // Presign URL with signature v4. + req = PreSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires) + } + return req, nil } - return minimumPartSize + + // Set content body if available. + if metadata.contentBody != nil { + req.Body = metadata.contentBody + } + + // set UserAgent for the request. + c.setUserAgent(req) + + // Set all headers. + for k, v := range metadata.customHeader { + req.Header.Set(k, v[0]) + } + + // set incoming content-length. + if metadata.contentLength > 0 { + req.ContentLength = metadata.contentLength + } + + // Set sha256 sum only for non anonymous credentials. + if !c.anonymous { + // set sha256 sum for signature calculation only with signature version '4'. + if c.signature.isV4() { + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) + if metadata.contentSha256Bytes != nil { + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(metadata.contentSha256Bytes)) + } + } + } + + // set md5Sum for content protection. + if metadata.contentMD5Bytes != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes)) + } + + // Sign the request if not anonymous. + if !c.anonymous { + if c.signature.isV2() { + // Add signature version '2' authorization header. + req = SignV2(*req, c.accessKeyID, c.secretAccessKey) + } else if c.signature.isV4() { + // Add signature version '4' authorization header. + req = SignV4(*req, c.accessKeyID, c.secretAccessKey, location) + } + } + // return request. + return req, nil } -func (a api) newObjectUpload(bucket, object, contentType string, size int64, data io.Reader) error { - initMultipartUploadResult, err := a.initiateMultipartUpload(bucket, object) +func (c Client) setUserAgent(req *http.Request) { + req.Header.Set("User-Agent", libraryUserAgent) + if c.appInfo.appName != "" && c.appInfo.appVersion != "" { + req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) + } +} + +func (c Client) makeTargetURL(bucketName, objectName string, queryValues url.Values) (*url.URL, error) { + urlStr := c.endpointURL.Scheme + "://" + c.endpointURL.Host + "/" + // Make URL only if bucketName is available, otherwise use the endpoint URL. + if bucketName != "" { + // If endpoint supports virtual host style use that always. + // Currently only S3 and Google Cloud Storage would support this. + if isVirtualHostSupported(c.endpointURL) { + urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + c.endpointURL.Host + "/" + if objectName != "" { + urlStr = urlStr + urlEncodePath(objectName) + } + } else { + // If not fall back to using path style. + urlStr = urlStr + bucketName + if objectName != "" { + urlStr = urlStr + "/" + urlEncodePath(objectName) + } + } + } + // If there are any query values, add them to the end. + if len(queryValues) > 0 { + urlStr = urlStr + "?" + queryValues.Encode() + } + u, err := url.Parse(urlStr) if err != nil { - return err + return nil, err } - uploadID := initMultipartUploadResult.UploadID - complMultipartUpload := completeMultipartUpload{} - var totalLength int64 - // Calculate optimal part size for a given size - partSize := calculatePartSize(size) - // Allocate bufferred error channel for maximum parts - errCh := make(chan error, maxParts) - // Limit multi part queue size to concurrent - mpQueueCh := make(chan struct{}, maxConcurrentQueue) - defer close(errCh) - defer close(mpQueueCh) - // Allocate a new wait group - wg := new(sync.WaitGroup) - - for p := range chopper(data, partSize, nil) { - // This check is primarily for last part - // This verifies if the part.Len was an unexpected read i.e if we lost few bytes - if p.Len < partSize && size > 0 { - expectedPartLen := size - totalLength - if expectedPartLen != p.Len { - return ErrorResponse{ - Code: "UnexpectedShortRead", - Message: "Data read ‘" + strconv.FormatInt(expectedPartLen, 10) + "’ is not equal to expected size ‘" + strconv.FormatInt(p.Len, 10) + "’", - Resource: separator + bucket + separator + object, - } - } - } - // Limit to 4 parts a given time - mpQueueCh <- struct{}{} - // Account for all parts uploaded simultaneousy - wg.Add(1) - go func(errCh chan<- error, mpQueueCh <-chan struct{}, p part) { - defer wg.Done() - defer func() { - <-mpQueueCh - }() - if p.Err != nil { - errCh <- p.Err - return - } - var complPart completePart - complPart, err = a.uploadPart(bucket, object, uploadID, p.MD5Sum, p.Num, p.Len, p.ReadSeeker) - if err != nil { - errCh <- err - return - } - complMultipartUpload.Parts = append(complMultipartUpload.Parts, complPart) - errCh <- nil - }(errCh, mpQueueCh, p) - totalLength += p.Len - } - wg.Wait() - if err := <-errCh; err != nil { - return err - } - sort.Sort(completedParts(complMultipartUpload.Parts)) - _, err = a.completeMultipartUpload(bucket, object, uploadID, complMultipartUpload) - if err != nil { - return err - } - return nil + return u, nil } -type partCh struct { - Metadata partMetadata - Err error -} - -func (a api) listObjectPartsRecursive(bucket, object, uploadID string) <-chan partCh { - partCh := make(chan partCh, 1000) - go a.listObjectPartsRecursiveInRoutine(bucket, object, uploadID, partCh) - return partCh -} - -func (a api) listObjectPartsRecursiveInRoutine(bucket, object, uploadID string, ch chan partCh) { - defer close(ch) - listObjPartsResult, err := a.listObjectParts(bucket, object, uploadID, 0, 1000) - if err != nil { - ch <- partCh{ - Metadata: partMetadata{}, - Err: err, - } - return - } - for _, uploadedPart := range listObjPartsResult.Parts { - ch <- partCh{ - Metadata: uploadedPart, - Err: nil, - } - } - for { - if !listObjPartsResult.IsTruncated { - break - } - listObjPartsResult, err = a.listObjectParts(bucket, object, uploadID, listObjPartsResult.NextPartNumberMarker, 1000) - if err != nil { - ch <- partCh{ - Metadata: partMetadata{}, - Err: err, - } - return - } - for _, uploadedPart := range listObjPartsResult.Parts { - ch <- partCh{ - Metadata: uploadedPart, - Err: nil, - } - } - } -} - -func (a api) getMultipartSize(bucket, object, uploadID string) (int64, error) { - var size int64 - for part := range a.listObjectPartsRecursive(bucket, object, uploadID) { - if part.Err != nil { - return 0, part.Err - } - size += part.Metadata.Size - } - return size, nil -} - -func (a api) continueObjectUpload(bucket, object, uploadID string, size int64, data io.Reader) error { - var skipParts []skipPart - completeMultipartUpload := completeMultipartUpload{} - var totalLength int64 - for part := range a.listObjectPartsRecursive(bucket, object, uploadID) { - if part.Err != nil { - return part.Err - } - var completedPart completePart - completedPart.PartNumber = part.Metadata.PartNumber - completedPart.ETag = part.Metadata.ETag - completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, completedPart) - md5SumBytes, err := hex.DecodeString(strings.Trim(part.Metadata.ETag, "\"")) // trim off the odd double quotes - if err != nil { - return err - } - totalLength += part.Metadata.Size - skipParts = append(skipParts, skipPart{ - md5sum: md5SumBytes, - partNumber: part.Metadata.PartNumber, - }) - } - - // Calculate the optimal part size for a given size - partSize := calculatePartSize(size) - // Allocate bufferred error channel for maximum parts - errCh := make(chan error, maxParts) - // Limit multipart queue size to concurrent - mpQueueCh := make(chan struct{}, maxConcurrentQueue) - defer close(errCh) - defer close(mpQueueCh) - // Allocate a new wait group - wg := new(sync.WaitGroup) - - for p := range chopper(data, partSize, skipParts) { - // This check is primarily for last part - // This verifies if the part.Len was an unexpected read i.e if we lost few bytes - if p.Len < partSize && size > 0 { - expectedPartLen := size - totalLength - if expectedPartLen != p.Len { - return ErrorResponse{ - Code: "UnexpectedShortRead", - Message: "Data read ‘" + strconv.FormatInt(expectedPartLen, 10) + "’ is not equal to expected size ‘" + strconv.FormatInt(p.Len, 10) + "’", - Resource: separator + bucket + separator + object, - } - } - } - // Limit to 4 parts a given time - mpQueueCh <- struct{}{} - // Account for all parts uploaded simultaneousy - wg.Add(1) - go func(errCh chan<- error, mpQueueCh <-chan struct{}, p part) { - defer wg.Done() - defer func() { - <-mpQueueCh - }() - if p.Err != nil { - errCh <- p.Err - return - } - completedPart, err := a.uploadPart(bucket, object, uploadID, p.MD5Sum, p.Num, p.Len, p.ReadSeeker) - if err != nil { - errCh <- err - return - } - completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, completedPart) - errCh <- nil - }(errCh, mpQueueCh, p) - totalLength += p.Len - } - wg.Wait() - if err := <-errCh; err != nil { - return err - } - sort.Sort(completedParts(completeMultipartUpload.Parts)) - _, err := a.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload) - if err != nil { - return err - } - return nil -} - -// PutObject create an object in a bucket -// -// You must have WRITE permissions on a bucket to create an object -// -// This version of PutObject automatically does multipart for more than 5MB worth of data -func (a api) PutObject(bucket, object, contentType string, size int64, data io.Reader) error { - if err := invalidBucketError(bucket); err != nil { - return err - } - if err := invalidArgumentError(object); err != nil { - return err - } - // for un-authenticated requests do not initiated multipart operation - // - // NOTE: this behavior is only kept valid for S3, since S3 doesn't - // allow unauthenticated multipart requests - if a.config.Region != "milkyway" { - if a.config.AccessKeyID == "" || a.config.SecretAccessKey == "" { - _, err := a.putObjectUnAuthenticated(bucket, object, contentType, size, data) - if err != nil { - return err - } - return nil - } - } - // Special handling just for Google Cloud Storage. - // TODO - we should remove this in future when we fully implement Resumable object upload. - if a.config.Region == "google" { - if size > maxPartSize { - return ErrorResponse{ - Code: "EntityTooLarge", - Message: "Your proposed upload exceeds the maximum allowed object size.", - Resource: separator + bucket + separator + object, - } - } - if _, err := a.putObject(bucket, object, contentType, nil, size, ReadSeekCloser(data)); err != nil { - return err - } - return nil - } - switch { - case size < minimumPartSize && size > 0: - // Single Part use case, use PutObject directly - for part := range chopper(data, minimumPartSize, nil) { - if part.Err != nil { - return part.Err - } - // This verifies if the part.Len was an unexpected read i.e if we lost few bytes - if part.Len != size { - return ErrorResponse{ - Code: "MethodUnexpectedEOF", - Message: "Data read is less than the requested size", - Resource: separator + bucket + separator + object, - } - } - _, err := a.putObject(bucket, object, contentType, part.MD5Sum, part.Len, part.ReadSeeker) - if err != nil { - return err - } - return nil - } - default: - var inProgress bool - var inProgressUploadID string - for mpUpload := range a.listMultipartUploadsRecursive(bucket, object) { - if mpUpload.Err != nil { - return mpUpload.Err - } - if mpUpload.Metadata.Key == object { - inProgress = true - inProgressUploadID = mpUpload.Metadata.UploadID - break - } - } - if !inProgress { - return a.newObjectUpload(bucket, object, contentType, size, data) - } - return a.continueObjectUpload(bucket, object, inProgressUploadID, size, data) - } - return errors.New("Unexpected control flow, please report this error at https://github.com/minio/minio-go/issues") -} - -// StatObject verify if object exists and you have permission to access it -func (a api) StatObject(bucket, object string) (ObjectStat, error) { - if err := invalidBucketError(bucket); err != nil { - return ObjectStat{}, err - } - if err := invalidObjectError(object); err != nil { - return ObjectStat{}, err - } - return a.headObject(bucket, object) -} - -// RemoveObject remove the object from a bucket -func (a api) RemoveObject(bucket, object string) error { - if err := invalidBucketError(bucket); err != nil { - return err - } - if err := invalidObjectError(object); err != nil { - return err - } - return a.deleteObject(bucket, object) -} - -/// Bucket operations - -// MakeBucket make a new bucket -// -// optional arguments are acl and location - by default all buckets are created -// with ``private`` acl and location set to US Standard if one wishes to set -// different ACLs and Location one can set them properly. -// -// ACL valid values -// -// private - owner gets full access [default] -// public-read - owner gets full access, all others get read access -// public-read-write - owner gets full access, all others get full access too -// authenticated-read - owner gets full access, authenticated users get read access -// -// Location valid values which are automatically derived from config endpoint -// -// [ us-west-1 | us-west-2 | eu-west-1 | eu-central-1 | ap-southeast-1 | ap-northeast-1 | ap-southeast-2 | sa-east-1 ] -// Default - US standard -func (a api) MakeBucket(bucket string, acl BucketACL) error { - if err := invalidBucketError(bucket); err != nil { - return err - } - if !acl.isValidBucketACL() { - return invalidArgumentError("") - } - location := a.config.Region - if location == "milkyway" { - location = "" - } - if location == "us-east-1" { - location = "" - } - return a.putBucket(bucket, string(acl), location) -} - -// SetBucketACL set the permissions on an existing bucket using access control lists (ACL) -// -// For example -// -// private - owner gets full access [default] -// public-read - owner gets full access, all others get read access -// public-read-write - owner gets full access, all others get full access too -// authenticated-read - owner gets full access, authenticated users get read access -// -func (a api) SetBucketACL(bucket string, acl BucketACL) error { - if err := invalidBucketError(bucket); err != nil { - return err - } - if !acl.isValidBucketACL() { - return invalidArgumentError("") - } - return a.putBucketACL(bucket, string(acl)) -} - -// GetBucketACL get the permissions on an existing bucket -// -// Returned values are: -// -// private - owner gets full access -// public-read - owner gets full access, others get read access -// public-read-write - owner gets full access, others get full access too -// authenticated-read - owner gets full access, authenticated users get read access -// -func (a api) GetBucketACL(bucket string) (BucketACL, error) { - if err := invalidBucketError(bucket); err != nil { - return "", err - } - policy, err := a.getBucketACL(bucket) - if err != nil { - return "", err - } - grants := policy.AccessControlList.Grant - switch { - case len(grants) == 1: - if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" { - return BucketACL("private"), nil - } - case len(grants) == 2: - for _, g := range grants { - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { - return BucketACL("authenticated-read"), nil - } - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { - return BucketACL("public-read"), nil - } - } - case len(grants) == 3: - for _, g := range grants { - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { - return BucketACL("public-read-write"), nil - } - } - } - return "", ErrorResponse{ - Code: "NoSuchBucketPolicy", - Message: "The specified bucket does not have a bucket policy.", - Resource: "/" + bucket, - RequestID: "minio", - } -} - -// BucketExists verify if bucket exists and you have permission to access it -func (a api) BucketExists(bucket string) error { - if err := invalidBucketError(bucket); err != nil { - return err - } - return a.headBucket(bucket) -} - -// RemoveBucket deletes the bucket named in the URI -// NOTE: - -// All objects (including all object versions and delete markers) -// in the bucket must be deleted before successfully attempting this request -func (a api) RemoveBucket(bucket string) error { - if err := invalidBucketError(bucket); err != nil { - return err - } - return a.deleteBucket(bucket) -} - -type multiPartUploadCh struct { - Metadata ObjectMultipartStat - Err error -} - -func (a api) listMultipartUploadsRecursive(bucket, object string) <-chan multiPartUploadCh { - ch := make(chan multiPartUploadCh, 1000) - go a.listMultipartUploadsRecursiveInRoutine(bucket, object, ch) - return ch -} - -func (a api) listMultipartUploadsRecursiveInRoutine(bucket, object string, ch chan multiPartUploadCh) { - defer close(ch) - listMultipartUplResult, err := a.listMultipartUploads(bucket, "", "", object, "", 1000) - if err != nil { - ch <- multiPartUploadCh{ - Metadata: ObjectMultipartStat{}, - Err: err, - } - return - } - for _, multiPartUpload := range listMultipartUplResult.Uploads { - ch <- multiPartUploadCh{ - Metadata: multiPartUpload, - Err: nil, - } - } - for { - if !listMultipartUplResult.IsTruncated { - break - } - listMultipartUplResult, err = a.listMultipartUploads(bucket, - listMultipartUplResult.NextKeyMarker, listMultipartUplResult.NextUploadIDMarker, object, "", 1000) - if err != nil { - ch <- multiPartUploadCh{ - Metadata: ObjectMultipartStat{}, - Err: err, - } - return - } - for _, multiPartUpload := range listMultipartUplResult.Uploads { - ch <- multiPartUploadCh{ - Metadata: multiPartUpload, - Err: nil, - } - } - } -} - -// listIncompleteUploadsInRoutine is an internal goroutine function called for listing objects -// This function feeds data into channel -func (a api) listIncompleteUploadsInRoutine(bucket, prefix string, recursive bool, ch chan ObjectMultipartStatCh) { - defer close(ch) - if err := invalidBucketError(bucket); err != nil { - ch <- ObjectMultipartStatCh{ - Stat: ObjectMultipartStat{}, - Err: err, - } - return - } - switch { - case recursive == true: - var multipartMarker string - var uploadIDMarker string - for { - result, err := a.listMultipartUploads(bucket, multipartMarker, uploadIDMarker, prefix, "", 1000) - if err != nil { - ch <- ObjectMultipartStatCh{ - Stat: ObjectMultipartStat{}, - Err: err, - } - return - } - for _, objectSt := range result.Uploads { - objectSt.Size, err = a.getMultipartSize(bucket, objectSt.Key, objectSt.UploadID) - if err != nil { - ch <- ObjectMultipartStatCh{ - Stat: ObjectMultipartStat{}, - Err: err, - } - } - ch <- ObjectMultipartStatCh{ - Stat: objectSt, - Err: nil, - } - multipartMarker = result.NextKeyMarker - uploadIDMarker = result.NextUploadIDMarker - } - if !result.IsTruncated { - break - } - } - default: - var multipartMarker string - var uploadIDMarker string - for { - result, err := a.listMultipartUploads(bucket, multipartMarker, uploadIDMarker, prefix, "/", 1000) - if err != nil { - ch <- ObjectMultipartStatCh{ - Stat: ObjectMultipartStat{}, - Err: err, - } - return - } - multipartMarker = result.NextKeyMarker - uploadIDMarker = result.NextUploadIDMarker - for _, objectSt := range result.Uploads { - objectSt.Size, err = a.getMultipartSize(bucket, objectSt.Key, objectSt.UploadID) - if err != nil { - ch <- ObjectMultipartStatCh{ - Stat: ObjectMultipartStat{}, - Err: err, - } - } - ch <- ObjectMultipartStatCh{ - Stat: objectSt, - Err: nil, - } - } - for _, prefix := range result.CommonPrefixes { - object := ObjectMultipartStat{} - object.Key = prefix.Prefix - object.Size = 0 - ch <- ObjectMultipartStatCh{ - Stat: object, - Err: nil, - } - } - if !result.IsTruncated { - break - } - } - } -} - -// ListIncompleteUploads - (List incompletely uploaded multipart objects) - List some multipart objects or all recursively -// -// ListIncompleteUploads is a channel based API implemented to facilitate ease of usage of S3 API ListMultipartUploads() -// by automatically recursively traversing all multipart objects on a given bucket if specified. -// -// Your input paramters are just bucket, prefix and recursive -// -// If you enable recursive as 'true' this function will return back all the multipart objects in a given bucket -// -// eg:- -// api := client.New(....) -// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", true) { -// fmt.Println(message.Stat) -// } -// -func (a api) ListIncompleteUploads(bucket, prefix string, recursive bool) <-chan ObjectMultipartStatCh { - ch := make(chan ObjectMultipartStatCh, 1000) - go a.listIncompleteUploadsInRoutine(bucket, prefix, recursive, ch) - return ch -} - -// listObjectsInRoutine is an internal goroutine function called for listing objects -// This function feeds data into channel -func (a api) listObjectsInRoutine(bucket, prefix string, recursive bool, ch chan ObjectStatCh) { - defer close(ch) - if err := invalidBucketError(bucket); err != nil { - ch <- ObjectStatCh{ - Stat: ObjectStat{}, - Err: err, - } - return - } - switch { - case recursive == true: - var marker string - for { - result, err := a.listObjects(bucket, marker, prefix, "", 1000) - if err != nil { - ch <- ObjectStatCh{ - Stat: ObjectStat{}, - Err: err, - } - return - } - for _, object := range result.Contents { - ch <- ObjectStatCh{ - Stat: object, - Err: nil, - } - marker = object.Key - } - if !result.IsTruncated { - break - } - } - default: - var marker string - for { - result, err := a.listObjects(bucket, marker, prefix, "/", 1000) - if err != nil { - ch <- ObjectStatCh{ - Stat: ObjectStat{}, - Err: err, - } - return - } - marker = result.NextMarker - for _, object := range result.Contents { - ch <- ObjectStatCh{ - Stat: object, - Err: nil, - } - } - for _, prefix := range result.CommonPrefixes { - object := ObjectStat{} - object.Key = prefix.Prefix - object.Size = 0 - ch <- ObjectStatCh{ - Stat: object, - Err: nil, - } - } - if !result.IsTruncated { - break - } - } - } -} - -// ListObjects - (List Objects) - List some objects or all recursively -// -// ListObjects is a channel based API implemented to facilitate ease of usage of S3 API ListObjects() -// by automatically recursively traversing all objects on a given bucket if specified. -// -// Your input paramters are just bucket, prefix and recursive -// -// If you enable recursive as 'true' this function will return back all the objects in a given bucket -// -// eg:- -// api := client.New(....) -// for message := range api.ListObjects("mytestbucket", "starthere", true) { -// fmt.Println(message.Stat) -// } -// -func (a api) ListObjects(bucket string, prefix string, recursive bool) <-chan ObjectStatCh { - ch := make(chan ObjectStatCh, 1000) - go a.listObjectsInRoutine(bucket, prefix, recursive, ch) - return ch -} - -// listBucketsInRoutine is an internal go routine function called for listing buckets -// This function feeds data into channel -func (a api) listBucketsInRoutine(ch chan BucketStatCh) { - defer close(ch) - listAllMyBucketListResults, err := a.listBuckets() - if err != nil { - ch <- BucketStatCh{ - Stat: BucketStat{}, - Err: err, - } - return - } - for _, bucket := range listAllMyBucketListResults.Buckets.Bucket { - ch <- BucketStatCh{ - Stat: bucket, - Err: nil, - } - } -} - -// ListBuckets list of all buckets owned by the authenticated sender of the request -// -// NOTE: -// This call requires explicit authentication, no anonymous -// requests are allowed for listing buckets -// -// eg:- -// api := client.New(....) -// for message := range api.ListBuckets() { -// fmt.Println(message.Stat) -// } -// -func (a api) ListBuckets() <-chan BucketStatCh { - ch := make(chan BucketStatCh, 100) - go a.listBucketsInRoutine(ch) - return ch -} - -func (a api) removeIncompleteUploadInRoutine(bucket, object string, errorCh chan error) { - defer close(errorCh) - if err := invalidBucketError(bucket); err != nil { - errorCh <- err - return - } - if err := invalidObjectError(object); err != nil { - errorCh <- err - return - } - listMultipartUplResult, err := a.listMultipartUploads(bucket, "", "", object, "", 1000) - if err != nil { - errorCh <- err - return - } - for _, multiPartUpload := range listMultipartUplResult.Uploads { - if object == multiPartUpload.Key { - err := a.abortMultipartUpload(bucket, multiPartUpload.Key, multiPartUpload.UploadID) - if err != nil { - errorCh <- err - return - } - return - } - } - for { - if !listMultipartUplResult.IsTruncated { - break - } - listMultipartUplResult, err = a.listMultipartUploads(bucket, - listMultipartUplResult.NextKeyMarker, listMultipartUplResult.NextUploadIDMarker, object, "", 1000) - if err != nil { - errorCh <- err - return - } - for _, multiPartUpload := range listMultipartUplResult.Uploads { - if object == multiPartUpload.Key { - err := a.abortMultipartUpload(bucket, multiPartUpload.Key, multiPartUpload.UploadID) - if err != nil { - errorCh <- err - return - } - return - } - } - - } -} - -// RemoveIncompleteUpload - abort a specific in progress active multipart upload -// requires explicit authentication, no anonymous requests are allowed for multipart API -func (a api) RemoveIncompleteUpload(bucket, object string) <-chan error { - errorCh := make(chan error) - go a.removeIncompleteUploadInRoutine(bucket, object, errorCh) - return errorCh +// CloudStorageClient - Cloud Storage Client interface. +type CloudStorageClient interface { + // Bucket Read/Write/Stat operations. + MakeBucket(bucketName string, cannedACL BucketACL, location string) error + BucketExists(bucketName string) error + RemoveBucket(bucketName string) error + SetBucketACL(bucketName string, cannedACL BucketACL) error + GetBucketACL(bucketName string) (BucketACL, error) + + ListBuckets() ([]BucketStat, error) + ListObjects(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectStat + ListIncompleteUploads(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartStat + + // Object Read/Write/Stat operations. + GetObject(bucketName, objectName string) (reader io.ReadCloser, stat ObjectStat, err error) + PutObject(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) + StatObject(bucketName, objectName string) (ObjectStat, error) + RemoveObject(bucketName, objectName string) error + RemoveIncompleteUpload(bucketName, objectName string) error + + // Object Read/Write for sparse upload. + GetObjectPartial(bucketName, objectName string) (reader ReadAtCloser, stat ObjectStat, err error) + PutObjectPartial(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error) + + // File to Object API. + FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) + FGetObject(bucketName, objectName, filePath string) error + + // Presigned operations. + PresignedGetObject(bucketName, objectName string, expires time.Duration) (presignedURL string, err error) + PresignedPutObject(bucketName, objectName string, expires time.Duration) (presignedURL string, err error) + PresignedPostPolicy(*PostPolicy) (formData map[string]string, err error) + + // Application info. + SetAppInfo(appName, appVersion string) + + // Set custom transport. + SetCustomTransport(customTransport http.RoundTripper) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go new file mode 100644 index 000000000..9e1d60cf9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go @@ -0,0 +1,158 @@ +package minio_test + +import ( + "bytes" + "io/ioutil" + "math/rand" + "os" + "testing" + "time" + + "github.com/minio/minio-go" +) + +const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" +const ( + letterIdxBits = 6 // 6 bits to represent a letter index + letterIdxMask = 1<= 0; { + if remain == 0 { + cache, remain = src.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + b[i] = letterBytes[idx] + i-- + } + cache >>= letterIdxBits + remain-- + } + return string(b[0:30]) +} + +func TestFunctional(t *testing.T) { + c, err := minio.New( + "play.minio.io:9002", + "Q3AM3UQ867SPQQA43P2F", + "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Set user agent. + c.SetAppInfo("Test", "0.1.0") + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + fileName := randString(60, rand.NewSource(time.Now().UnixNano())) + file, err := os.Create(fileName) + if err != nil { + t.Fatal("Error:", err) + } + for i := 0; i < 10; i++ { + file.WriteString(fileName) + } + file.Close() + + err = c.BucketExists(bucketName) + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + err = c.SetBucketACL(bucketName, "public-read-write") + if err != nil { + t.Fatal("Error:", err) + } + + acl, err := c.GetBucketACL(bucketName) + if err != nil { + t.Fatal("Error:", err) + } + if acl != minio.BucketACL("public-read-write") { + t.Fatal("Error:", acl) + } + + _, err = c.ListBuckets() + if err != nil { + t.Fatal("Error:", err) + } + + objectName := bucketName + "Minio" + reader := bytes.NewReader([]byte("Hello World!")) + + n, err := c.PutObject(bucketName, objectName, reader, int64(reader.Len()), "") + if err != nil { + t.Fatal("Error: ", err) + } + if n != int64(len([]byte("Hello World!"))) { + t.Fatal("Error: bad length ", n, reader.Len()) + } + + newReader, _, err := c.GetObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + + n, err = c.FPutObject(bucketName, objectName+"-f", fileName, "text/plain") + if err != nil { + t.Fatal("Error: ", err) + } + if n != int64(10*len(fileName)) { + t.Fatal("Error: bad length ", n, int64(10*len(fileName))) + } + + err = c.FGetObject(bucketName, objectName+"-f", fileName+"-f") + if err != nil { + t.Fatal("Error: ", err) + } + + newReadBytes, err := ioutil.ReadAll(newReader) + if err != nil { + t.Fatal("Error: ", err) + } + + if !bytes.Equal(newReadBytes, []byte("Hello World!")) { + t.Fatal("Error: bytes invalid.") + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveObject(bucketName, objectName+"-f") + if err != nil { + t.Fatal("Error: ", err) + } + + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } + + err = c.RemoveBucket("bucket1") + if err == nil { + t.Fatal("Error:") + } + + if err.Error() != "The specified bucket does not exist." { + t.Fatal("Error: ", err) + } + + if err = os.Remove(fileName); err != nil { + t.Fatal("Error: ", err) + } + if err = os.Remove(fileName + "-f"); err != nil { + t.Fatal("Error: ", err) + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_handlers_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_handlers_test.go deleted file mode 100644 index 146f4d6e1..000000000 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api_handlers_test.go +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio_test - -// bucketHandler is an http.Handler that verifies bucket responses and validates incoming requests -import ( - "bytes" - "io" - "net/http" - "strconv" - "time" -) - -type bucketHandler struct { - resource string -} - -func (h bucketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - switch { - case r.Method == "GET": - switch { - case r.URL.Path == "/": - response := []byte("bucket2015-05-20T23:05:09.230Zminiominio") - w.Header().Set("Content-Length", strconv.Itoa(len(response))) - w.Write(response) - case r.URL.Path == "/bucket": - _, ok := r.URL.Query()["acl"] - if ok { - response := []byte("75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06aCustomersName@amazon.com75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06aCustomersName@amazon.comFULL_CONTROL") - w.Header().Set("Content-Length", strconv.Itoa(len(response))) - w.Write(response) - return - } - fallthrough - case r.URL.Path == "/bucket": - response := []byte("\"259d04a13802ae09c7e41be50ccc6baa\"object2015-05-21T18:24:21.097Z22061miniominioSTANDARDfalse1000testbucket") - w.Header().Set("Content-Length", strconv.Itoa(len(response))) - w.Write(response) - } - case r.Method == "PUT": - switch { - case r.URL.Path == h.resource: - _, ok := r.URL.Query()["acl"] - if ok { - switch r.Header.Get("x-amz-acl") { - case "public-read-write": - fallthrough - case "public-read": - fallthrough - case "private": - fallthrough - case "authenticated-read": - w.WriteHeader(http.StatusOK) - return - default: - w.WriteHeader(http.StatusNotImplemented) - return - } - } - w.WriteHeader(http.StatusOK) - default: - w.WriteHeader(http.StatusBadRequest) - } - case r.Method == "HEAD": - switch { - case r.URL.Path == h.resource: - w.WriteHeader(http.StatusOK) - default: - w.WriteHeader(http.StatusForbidden) - } - case r.Method == "DELETE": - switch { - case r.URL.Path != h.resource: - w.WriteHeader(http.StatusNotFound) - default: - h.resource = "" - w.WriteHeader(http.StatusNoContent) - } - } -} - -// objectHandler is an http.Handler that verifies object responses and validates incoming requests -type objectHandler struct { - resource string - data []byte -} - -func (h objectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - switch { - case r.Method == "PUT": - length, err := strconv.Atoi(r.Header.Get("Content-Length")) - if err != nil { - w.WriteHeader(http.StatusBadRequest) - return - } - var buffer bytes.Buffer - _, err = io.CopyN(&buffer, r.Body, int64(length)) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - return - } - if !bytes.Equal(h.data, buffer.Bytes()) { - w.WriteHeader(http.StatusInternalServerError) - return - } - w.Header().Set("ETag", "\"9af2f8218b150c351ad802c6f3d66abe\"") - w.WriteHeader(http.StatusOK) - case r.Method == "HEAD": - if r.URL.Path != h.resource { - w.WriteHeader(http.StatusNotFound) - return - } - w.Header().Set("Content-Length", strconv.Itoa(len(h.data))) - w.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat)) - w.Header().Set("ETag", "\"9af2f8218b150c351ad802c6f3d66abe\"") - w.WriteHeader(http.StatusOK) - case r.Method == "POST": - _, ok := r.URL.Query()["uploads"] - if ok { - response := []byte("example-bucketobjectXXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA") - w.Header().Set("Content-Length", strconv.Itoa(len(response))) - w.Write(response) - return - } - case r.Method == "GET": - _, ok := r.URL.Query()["uploadId"] - if ok { - uploadID := r.URL.Query().Get("uploadId") - if uploadID != "XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA" { - w.WriteHeader(http.StatusNotFound) - return - } - response := []byte("example-bucketexample-objectXXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZAarn:aws:iam::111122223333:user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xxumat-user-11116a31-17b5-4fb7-9df5-b288870f11xx75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06asomeNameSTANDARD132true22010-11-10T20:48:34.000Z\"7778aef83f66abc1fa1e8477f296d394\"1048576032010-11-10T20:48:33.000Z\"aaaa18db4cc2f85cedef654fccc4a4x8\"10485760") - w.Header().Set("Content-Length", strconv.Itoa(len(response))) - w.Write(response) - return - } - if r.URL.Path != h.resource { - w.WriteHeader(http.StatusNotFound) - return - } - w.Header().Set("Content-Length", strconv.Itoa(len(h.data))) - w.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat)) - w.Header().Set("ETag", "\"9af2f8218b150c351ad802c6f3d66abe\"") - w.WriteHeader(http.StatusOK) - io.Copy(w, bytes.NewReader(h.data)) - case r.Method == "DELETE": - if r.URL.Path != h.resource { - w.WriteHeader(http.StatusNotFound) - return - } - h.resource = "" - h.data = nil - w.WriteHeader(http.StatusNoContent) - } -} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go index 23d1832a2..2bda99f47 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go @@ -17,25 +17,25 @@ package minio import ( - "strings" + "net/url" "testing" ) func TestSignature(t *testing.T) { - conf := new(Config) - if !conf.Signature.isLatest() { - t.Fatalf("Error") + clnt := Client{} + if !clnt.signature.isV4() { + t.Fatal("Error") } - conf.Signature = SignatureV2 - if !conf.Signature.isV2() { - t.Fatalf("Error") + clnt.signature = SignatureV2 + if !clnt.signature.isV2() { + t.Fatal("Error") } - if conf.Signature.isV4() { - t.Fatalf("Error") + if clnt.signature.isV4() { + t.Fatal("Error") } - conf.Signature = SignatureV4 - if !conf.Signature.isV4() { - t.Fatalf("Error") + clnt.signature = SignatureV4 + if !clnt.signature.isV4() { + t.Fatal("Error") } } @@ -54,36 +54,17 @@ func TestACLTypes(t *testing.T) { } } -func TestUserAgent(t *testing.T) { - conf := new(Config) - conf.SetUserAgent("minio", "1.0", "amd64") - if !strings.Contains(conf.userAgent, "minio") { - t.Fatalf("Error") - } -} - -func TestGetRegion(t *testing.T) { - region := getRegion("s3.amazonaws.com") - if region != "us-east-1" { - t.Fatalf("Error") - } - region = getRegion("localhost:9000") - if region != "milkyway" { - t.Fatalf("Error") - } -} - func TestPartSize(t *testing.T) { var maxPartSize int64 = 1024 * 1024 * 1024 * 5 - partSize := calculatePartSize(5000000000000000000) + partSize := optimalPartSize(5000000000000000000) if partSize > minimumPartSize { if partSize > maxPartSize { - t.Fatal("invalid result, cannot be bigger than maxPartSize 5GB") + t.Fatal("invalid result, cannot be bigger than maxPartSize 5GiB") } } - partSize = calculatePartSize(50000000000) + partSize = optimalPartSize(50000000000) if partSize > minimumPartSize { - t.Fatal("invalid result, cannot be bigger than minimumPartSize 5MB") + t.Fatal("invalid result, cannot be bigger than minimumPartSize 5MiB") } } @@ -121,8 +102,148 @@ func TestURLEncoding(t *testing.T) { } for _, u := range want { - if u.encodedName != getURLEncodedPath(u.name) { - t.Errorf("Error") + if u.encodedName != urlEncodePath(u.name) { + t.Fatal("Error") + } + } +} + +func TestGetEndpointURL(t *testing.T) { + if _, err := getEndpointURL("s3.amazonaws.com", false); err != nil { + t.Fatal("Error:", err) + } + if _, err := getEndpointURL("192.168.1.1", false); err != nil { + t.Fatal("Error:", err) + } + if _, err := getEndpointURL("13333.123123.-", false); err == nil { + t.Fatal("Error") + } + if _, err := getEndpointURL("s3.aamzza.-", false); err == nil { + t.Fatal("Error") + } + if _, err := getEndpointURL("s3.amazonaws.com:443", false); err == nil { + t.Fatal("Error") + } +} + +func TestValidIP(t *testing.T) { + type validIP struct { + ip string + valid bool + } + + want := []validIP{ + { + ip: "192.168.1.1", + valid: true, + }, + { + ip: "192.1.8", + valid: false, + }, + { + ip: "..192.", + valid: false, + }, + { + ip: "192.168.1.1.1", + valid: false, + }, + } + for _, w := range want { + valid := isValidIP(w.ip) + if valid != w.valid { + t.Fatal("Error") + } + } +} + +func TestValidEndpointDomain(t *testing.T) { + type validEndpoint struct { + endpointDomain string + valid bool + } + + want := []validEndpoint{ + { + endpointDomain: "s3.amazonaws.com", + valid: true, + }, + { + endpointDomain: "s3.amazonaws.com_", + valid: false, + }, + { + endpointDomain: "%$$$", + valid: false, + }, + { + endpointDomain: "s3.amz.test.com", + valid: true, + }, + { + endpointDomain: "s3.%%", + valid: false, + }, + { + endpointDomain: "localhost", + valid: true, + }, + { + endpointDomain: "-localhost", + valid: false, + }, + { + endpointDomain: "", + valid: false, + }, + { + endpointDomain: "\n \t", + valid: false, + }, + { + endpointDomain: " ", + valid: false, + }, + } + for _, w := range want { + valid := isValidDomain(w.endpointDomain) + if valid != w.valid { + t.Fatal("Error:", w.endpointDomain) + } + } +} + +func TestValidEndpointURL(t *testing.T) { + type validURL struct { + url string + valid bool + } + want := []validURL{ + { + url: "https://s3.amazonaws.com", + valid: true, + }, + { + url: "https://s3.amazonaws.com/bucket/object", + valid: false, + }, + { + url: "192.168.1.1", + valid: false, + }, + } + for _, w := range want { + u, err := url.Parse(w.url) + if err != nil { + t.Fatal("Error:", err) + } + valid := false + if err := isValidEndpointURL(u); err == nil { + valid = true + } + if valid != w.valid { + t.Fatal("Error") } } } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_public_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_public_test.go deleted file mode 100644 index 674f5d770..000000000 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api_public_test.go +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio_test - -import ( - "bytes" - "io" - "net/http/httptest" - "testing" - "time" - - "github.com/minio/minio-go" -) - -func TestBucketOperations(t *testing.T) { - bucket := bucketHandler(bucketHandler{ - resource: "/bucket", - }) - server := httptest.NewServer(bucket) - defer server.Close() - - a, err := minio.New(minio.Config{Endpoint: server.URL}) - if err != nil { - t.Fatal("Error") - } - err = a.MakeBucket("bucket", "private") - if err != nil { - t.Fatal("Error") - } - - err = a.BucketExists("bucket") - if err != nil { - t.Fatal("Error") - } - - err = a.BucketExists("bucket1") - if err == nil { - t.Fatal("Error") - } - if err.Error() != "Access Denied." { - t.Fatal("Error") - } - - err = a.SetBucketACL("bucket", "public-read-write") - if err != nil { - t.Fatal("Error") - } - - acl, err := a.GetBucketACL("bucket") - if err != nil { - t.Fatal("Error") - } - if acl != minio.BucketACL("private") { - t.Fatal("Error") - } - - for b := range a.ListBuckets() { - if b.Err != nil { - t.Fatal(b.Err.Error()) - } - if b.Stat.Name != "bucket" { - t.Fatal("Error") - } - } - - for o := range a.ListObjects("bucket", "", true) { - if o.Err != nil { - t.Fatal(o.Err.Error()) - } - if o.Stat.Key != "object" { - t.Fatal("Error") - } - } - - err = a.RemoveBucket("bucket") - if err != nil { - t.Fatal("Error") - } - - err = a.RemoveBucket("bucket1") - if err == nil { - t.Fatal("Error") - } - if err.Error() != "The specified bucket does not exist." { - t.Fatal("Error") - } -} - -func TestBucketOperationsFail(t *testing.T) { - bucket := bucketHandler(bucketHandler{ - resource: "/bucket", - }) - server := httptest.NewServer(bucket) - defer server.Close() - - a, err := minio.New(minio.Config{Endpoint: server.URL}) - if err != nil { - t.Fatal("Error") - } - err = a.MakeBucket("bucket$$$", "private") - if err == nil { - t.Fatal("Error") - } - - err = a.BucketExists("bucket.") - if err == nil { - t.Fatal("Error") - } - - err = a.SetBucketACL("bucket-.", "public-read-write") - if err == nil { - t.Fatal("Error") - } - - _, err = a.GetBucketACL("bucket??") - if err == nil { - t.Fatal("Error") - } - - for o := range a.ListObjects("bucket??", "", true) { - if o.Err == nil { - t.Fatal(o.Err.Error()) - } - } - - err = a.RemoveBucket("bucket??") - if err == nil { - t.Fatal("Error") - } - - if err.Error() != "The specified bucket is not valid." { - t.Fatal("Error") - } -} - -func TestObjectOperations(t *testing.T) { - object := objectHandler(objectHandler{ - resource: "/bucket/object", - data: []byte("Hello, World"), - }) - server := httptest.NewServer(object) - defer server.Close() - - a, err := minio.New(minio.Config{Endpoint: server.URL}) - if err != nil { - t.Fatal("Error") - } - data := []byte("Hello, World") - err = a.PutObject("bucket", "object", "", int64(len(data)), bytes.NewReader(data)) - if err != nil { - t.Fatal("Error") - } - metadata, err := a.StatObject("bucket", "object") - if err != nil { - t.Fatal("Error") - } - if metadata.Key != "object" { - t.Fatal("Error") - } - if metadata.ETag != "9af2f8218b150c351ad802c6f3d66abe" { - t.Fatal("Error") - } - - reader, metadata, err := a.GetObject("bucket", "object") - if err != nil { - t.Fatal("Error") - } - if metadata.Key != "object" { - t.Fatal("Error") - } - if metadata.ETag != "9af2f8218b150c351ad802c6f3d66abe" { - t.Fatal("Error") - } - - var buffer bytes.Buffer - _, err = io.Copy(&buffer, reader) - if !bytes.Equal(buffer.Bytes(), data) { - t.Fatal("Error") - } - - err = a.RemoveObject("bucket", "object") - if err != nil { - t.Fatal("Error") - } - err = a.RemoveObject("bucket", "object1") - if err == nil { - t.Fatal("Error") - } - if err.Error() != "The specified key does not exist." { - t.Fatal("Error") - } -} - -func TestPresignedURL(t *testing.T) { - object := objectHandler(objectHandler{ - resource: "/bucket/object", - data: []byte("Hello, World"), - }) - server := httptest.NewServer(object) - defer server.Close() - - a, err := minio.New(minio.Config{Endpoint: server.URL}) - if err != nil { - t.Fatal("Error") - } - // should error out for invalid access keys - _, err = a.PresignedGetObject("bucket", "object", time.Duration(1000)*time.Second) - if err == nil { - t.Fatal("Error") - } - - a, err = minio.New(minio.Config{ - Endpoint: server.URL, - AccessKeyID: "accessKey", - SecretAccessKey: "secretKey", - }) - if err != nil { - t.Fatal("Error") - } - url, err := a.PresignedGetObject("bucket", "object", time.Duration(1000)*time.Second) - if err != nil { - t.Fatal("Error") - } - if url == "" { - t.Fatal("Error") - } - _, err = a.PresignedGetObject("bucket", "object", time.Duration(0)*time.Second) - if err == nil { - t.Fatal("Error") - } - _, err = a.PresignedGetObject("bucket", "object", time.Duration(604801)*time.Second) - if err == nil { - t.Fatal("Error") - } -} - -func TestErrorResponse(t *testing.T) { - errorResponse := []byte("AccessDeniedAccess Denied/mybucket/myphoto.jpgF19772218238A85AGuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD") - errorReader := bytes.NewReader(errorResponse) - err := minio.BodyToErrorResponse(errorReader, "application/xml") - if err == nil { - t.Fatal("Error") - } - if err.Error() != "Access Denied" { - t.Fatal("Error") - } - resp := minio.ToErrorResponse(err) - // valid all fields - if resp == nil { - t.Fatal("Error") - } - if resp.Code != "AccessDenied" { - t.Fatal("Error") - } - if resp.RequestID != "F19772218238A85A" { - t.Fatal("Error") - } - if resp.Message != "Access Denied" { - t.Fatal("Error") - } - if resp.Resource != "/mybucket/myphoto.jpg" { - t.Fatal("Error") - } - if resp.HostID != "GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD" { - t.Fatal("Error") - } - if resp.ToXML() == "" { - t.Fatal("Error") - } - if resp.ToJSON() == "" { - t.Fatal("Error") - } -} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml b/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml index 1d140afd9..7f624a459 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml +++ b/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml @@ -14,9 +14,6 @@ environment: # scripts that run after cloning repository install: - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - rd C:\Go /s /q - - appveyor DownloadFile https://storage.googleapis.com/golang/go1.5.1.windows-amd64.zip - - 7z x go1.5.1.windows-amd64.zip -oC:\ >nul - go version - go env - go get -u github.com/golang/lint/golint diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/bucket-acl.go b/Godeps/_workspace/src/github.com/minio/minio-go/bucket-acl.go index 5718dbbd3..89c386ca1 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/bucket-acl.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/bucket-acl.go @@ -16,10 +16,10 @@ package minio -// BucketACL - bucket level access control +// BucketACL - bucket level access control. type BucketACL string -// different types of ACL's currently supported for buckets +// Different types of ACL's currently supported for buckets. const ( bucketPrivate = BucketACL("private") bucketReadOnly = BucketACL("public-read") @@ -27,7 +27,7 @@ const ( bucketAuthenticated = BucketACL("authenticated-read") ) -// String printer helper +// Stringify acl. func (b BucketACL) String() string { if string(b) == "" { return "private" @@ -35,7 +35,7 @@ func (b BucketACL) String() string { return string(b) } -// isValidBucketACL - is provided acl string supported +// isValidBucketACL - is provided acl string supported. func (b BucketACL) isValidBucketACL() bool { switch true { case b.isPrivate(): @@ -54,22 +54,22 @@ func (b BucketACL) isValidBucketACL() bool { } } -// IsPrivate - is acl Private +// isPrivate - is acl Private. func (b BucketACL) isPrivate() bool { return b == bucketPrivate } -// IsPublicRead - is acl PublicRead +// isPublicRead - is acl PublicRead. func (b BucketACL) isReadOnly() bool { return b == bucketReadOnly } -// IsPublicReadWrite - is acl PublicReadWrite +// isPublicReadWrite - is acl PublicReadWrite. func (b BucketACL) isPublic() bool { return b == bucketPublic } -// IsAuthenticated - is acl AuthenticatedRead +// isAuthenticated - is acl AuthenticatedRead. func (b BucketACL) isAuthenticated() bool { return b == bucketAuthenticated } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/bucket-cache.go b/Godeps/_workspace/src/github.com/minio/minio-go/bucket-cache.go new file mode 100644 index 000000000..29fb6aa36 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/bucket-cache.go @@ -0,0 +1,153 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/hex" + "net/http" + "net/url" + "path/filepath" + "sync" +) + +// bucketLocationCache provides simple mechansim to hold bucket locations in memory. +type bucketLocationCache struct { + // Mutex is used for handling the concurrent + // read/write requests for cache + sync.RWMutex + + // items holds the cached bucket locations. + items map[string]string +} + +// newBucketLocationCache provides a new bucket location cache to be used +// internally with the client object. +func newBucketLocationCache() *bucketLocationCache { + return &bucketLocationCache{ + items: make(map[string]string), + } +} + +// Get returns a value of a given key if it exists +func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) { + r.RLock() + defer r.RUnlock() + location, ok = r.items[bucketName] + return +} + +// Set will persist a value to the cache +func (r *bucketLocationCache) Set(bucketName string, location string) { + r.Lock() + defer r.Unlock() + r.items[bucketName] = location +} + +// Delete deletes a bucket name. +func (r *bucketLocationCache) Delete(bucketName string) { + r.Lock() + defer r.Unlock() + delete(r.items, bucketName) +} + +// getBucketLocation - get location for the bucketName from location map cache. +func (c Client) getBucketLocation(bucketName string) (string, error) { + // For anonymous requests, default to "us-east-1" and let other calls + // move forward. + if c.anonymous { + return "us-east-1", nil + } + if location, ok := c.bucketLocCache.Get(bucketName); ok { + return location, nil + } + + // Initialize a new request. + req, err := c.getBucketLocationRequest(bucketName) + if err != nil { + return "", err + } + + // Initiate the request. + resp, err := c.httpClient.Do(req) + defer closeResponse(resp) + if err != nil { + return "", err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", HTTPRespToErrorResponse(resp, bucketName, "") + } + } + + // Extract location. + var locationConstraint string + err = xmlDecoder(resp.Body, &locationConstraint) + if err != nil { + return "", err + } + + location := locationConstraint + // location is empty will be 'us-east-1'. + if location == "" { + location = "us-east-1" + } + + // location can be 'EU' convert it to meaningful 'eu-west-1'. + if location == "EU" { + location = "eu-west-1" + } + + // Save the location into cache. + c.bucketLocCache.Set(bucketName, location) + + // Return. + return location, nil +} + +// getBucketLocationRequest wrapper creates a new getBucketLocation request. +func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) { + // Set location query. + urlValues := make(url.Values) + urlValues.Set("location", "") + + // Set get bucket location always as path style. + targetURL := c.endpointURL + targetURL.Path = filepath.Join(bucketName, "") + targetURL.RawQuery = urlValues.Encode() + + // get a new HTTP request for the method. + req, err := http.NewRequest("GET", targetURL.String(), nil) + if err != nil { + return nil, err + } + + // set UserAgent for the request. + c.setUserAgent(req) + + // set sha256 sum for signature calculation only with signature version '4'. + if c.signature.isV4() { + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) + } + + // Sign the request. + if c.signature.isV4() { + req = SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1") + } else if c.signature.isV2() { + req = SignV2(*req, c.accessKeyID, c.secretAccessKey) + } + return req, nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/chopper.go b/Godeps/_workspace/src/github.com/minio/minio-go/chopper.go deleted file mode 100644 index 6b2ff9a19..000000000 --- a/Godeps/_workspace/src/github.com/minio/minio-go/chopper.go +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "crypto/md5" - "io" -) - -// part - message structure for results from the MultiPart -type part struct { - MD5Sum []byte - ReadSeeker io.ReadSeeker - Err error - Len int64 - Num int // part number -} - -// skipPart - skipping uploaded parts -type skipPart struct { - md5sum []byte - partNumber int -} - -// chopper reads from io.Reader, partitions the data into chunks of given chunksize, and sends -// each chunk as io.ReadSeeker to the caller over a channel -// -// This method runs until an EOF or error occurs. If an error occurs, -// the method sends the error over the channel and returns. -// Before returning, the channel is always closed. -// -// additionally this function also skips list of parts if provided -func chopper(reader io.Reader, chunkSize int64, skipParts []skipPart) <-chan part { - ch := make(chan part, 3) - go chopperInRoutine(reader, chunkSize, skipParts, ch) - return ch -} - -func chopperInRoutine(reader io.Reader, chunkSize int64, skipParts []skipPart, ch chan part) { - defer close(ch) - p := make([]byte, chunkSize) - n, err := io.ReadFull(reader, p) - if err == io.EOF || err == io.ErrUnexpectedEOF { // short read, only single part return - m := md5.Sum(p[0:n]) - ch <- part{ - MD5Sum: m[:], - ReadSeeker: bytes.NewReader(p[0:n]), - Err: nil, - Len: int64(n), - Num: 1, - } - return - } - // catastrophic error send error and return - if err != nil { - ch <- part{ - ReadSeeker: nil, - Err: err, - Num: 0, - } - return - } - // send the first part - var num = 1 - md5SumBytes := md5.Sum(p) - sp := skipPart{ - partNumber: num, - md5sum: md5SumBytes[:], - } - if !isPartNumberUploaded(sp, skipParts) { - ch <- part{ - MD5Sum: md5SumBytes[:], - ReadSeeker: bytes.NewReader(p), - Err: nil, - Len: int64(n), - Num: num, - } - } - for err == nil { - var n int - p := make([]byte, chunkSize) - n, err = io.ReadFull(reader, p) - if err != nil { - if err != io.EOF && err != io.ErrUnexpectedEOF { // catastrophic error - ch <- part{ - ReadSeeker: nil, - Err: err, - Num: 0, - } - return - } - } - num++ - md5SumBytes := md5.Sum(p[0:n]) - sp := skipPart{ - partNumber: num, - md5sum: md5SumBytes[:], - } - if isPartNumberUploaded(sp, skipParts) { - continue - } - ch <- part{ - MD5Sum: md5SumBytes[:], - ReadSeeker: bytes.NewReader(p[0:n]), - Err: nil, - Len: int64(n), - Num: num, - } - - } -} - -// to verify if partNumber is part of the skip part list -func isPartNumberUploaded(part skipPart, skipParts []skipPart) bool { - for _, p := range skipParts { - if p.partNumber == part.partNumber && bytes.Equal(p.md5sum, part.md5sum) { - return true - } - } - return false -} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/common-methods.go b/Godeps/_workspace/src/github.com/minio/minio-go/common-methods.go new file mode 100644 index 000000000..636e06f6f --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/common-methods.go @@ -0,0 +1,52 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/hmac" + "crypto/md5" + "crypto/sha256" + "encoding/xml" + "io" +) + +// xmlDecoder provide decoded value in xml. +func xmlDecoder(body io.Reader, v interface{}) error { + d := xml.NewDecoder(body) + return d.Decode(v) +} + +// sum256 calculate sha256 sum for an input byte array. +func sum256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// sumMD5 calculate md5 sum for an input byte array. +func sumMD5(data []byte) []byte { + hash := md5.New() + hash.Write(data) + return hash.Sum(nil) +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/common.go b/Godeps/_workspace/src/github.com/minio/minio-go/common.go deleted file mode 100644 index 8ac854681..000000000 --- a/Godeps/_workspace/src/github.com/minio/minio-go/common.go +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "encoding/xml" - "io" - "strings" - "time" -) - -// decoder provides a unified decoding method interface -type decoder interface { - Decode(v interface{}) error -} - -// acceptTypeDecoder provide decoded value in given acceptType -func acceptTypeDecoder(body io.Reader, acceptType string, v interface{}) error { - var d decoder - switch { - case acceptType == "application/xml": - d = xml.NewDecoder(body) - case acceptType == "application/json": - d = json.NewDecoder(body) - default: - d = xml.NewDecoder(body) - } - return d.Decode(v) -} - -// sum256Reader calculate sha256 sum for an input read seeker -func sum256Reader(reader io.ReadSeeker) ([]byte, error) { - h := sha256.New() - var err error - - start, _ := reader.Seek(0, 1) - defer reader.Seek(start, 0) - - for err == nil { - length := 0 - byteBuffer := make([]byte, 1024*1024) - length, err = reader.Read(byteBuffer) - byteBuffer = byteBuffer[0:length] - h.Write(byteBuffer) - } - - if err != io.EOF { - return nil, err - } - - return h.Sum(nil), nil -} - -// sum256 calculate sha256 sum for an input byte array -func sum256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -// sumHMAC calculate hmac between two input byte array -func sumHMAC(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -// getSigningKey hmac seed to calculate final signature -func getSigningKey(secret, region string, t time.Time) []byte { - date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) - regionbytes := sumHMAC(date, []byte(region)) - service := sumHMAC(regionbytes, []byte("s3")) - signingKey := sumHMAC(service, []byte("aws4_request")) - return signingKey -} - -// getSignature final signature in hexadecimal form -func getSignature(signingKey []byte, stringToSign string) string { - return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) -} - -// getScope generate a string of a specific date, an AWS region, and a service -func getScope(region string, t time.Time) string { - scope := strings.Join([]string{ - t.Format(yyyymmdd), - region, - "s3", - "aws4_request", - }, "/") - return scope -} - -// getCredential generate a credential string -func getCredential(accessKeyID, region string, t time.Time) string { - scope := getScope(region, t) - return accessKeyID + "/" + scope -} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/constants.go b/Godeps/_workspace/src/github.com/minio/minio-go/constants.go new file mode 100644 index 000000000..617621298 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/constants.go @@ -0,0 +1,38 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +/// Multipart upload defaults. + +// minimumPartSize - minimum part size 5MiB per object after which +// putObject behaves internally as multipart. +var minimumPartSize int64 = 1024 * 1024 * 5 + +// maxParts - maximum parts for a single multipart session. +var maxParts = int64(10000) + +// maxPartSize - maximum part size 5GiB for a single multipart upload operation. +var maxPartSize int64 = 1024 * 1024 * 1024 * 5 + +// maxSinglePutObjectSize - maximum size 5GiB of object per PUT operation. +var maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 + +// maxMultipartPutObjectSize - maximum size 5TiB of object for Multipart operation. +var maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 + +// optimalReadAtBufferSize - optimal buffer 5MiB used for reading through ReadAt operation. +var optimalReadAtBufferSize = 1024 * 1024 * 5 diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/errors.go b/Godeps/_workspace/src/github.com/minio/minio-go/errors.go deleted file mode 100644 index b85e36e51..000000000 --- a/Godeps/_workspace/src/github.com/minio/minio-go/errors.go +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "encoding/json" - "encoding/xml" - "io" - "regexp" - "strings" - "unicode/utf8" -) - -/* **** SAMPLE ERROR RESPONSE **** - - - AccessDenied - Access Denied - /mybucket/myphoto.jpg - F19772218238A85A - GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD - -*/ - -// ErrorResponse is the type error returned by some API operations. -type ErrorResponse struct { - XMLName xml.Name `xml:"Error" json:"-"` - Code string - Message string - Resource string - RequestID string `xml:"RequestId"` - HostID string `xml:"HostId"` -} - -// ToErrorResponse returns parsed ErrorResponse struct, if input is nil or not ErrorResponse return value is nil -// this fuction is useful when some one wants to dig deeper into the error structures over the network. -// -// for example: -// -// import s3 "github.com/minio/minio-go" -// ... -// ... -// ..., err := s3.GetObject(...) -// if err != nil { -// resp := s3.ToErrorResponse(err) -// fmt.Println(resp.ToXML()) -// } -// ... -// ... -func ToErrorResponse(err error) *ErrorResponse { - switch err := err.(type) { - case ErrorResponse: - return &err - default: - return nil - } -} - -// ToXML send raw xml marshalled as string -func (e ErrorResponse) ToXML() string { - b, err := xml.Marshal(&e) - if err != nil { - panic(err) - } - return string(b) -} - -// ToJSON send raw json marshalled as string -func (e ErrorResponse) ToJSON() string { - b, err := json.Marshal(&e) - if err != nil { - panic(err) - } - return string(b) -} - -// Error formats HTTP error string -func (e ErrorResponse) Error() string { - return e.Message -} - -// BodyToErrorResponse returns a new encoded ErrorResponse structure -func BodyToErrorResponse(errBody io.Reader, acceptType string) error { - var errorResponse ErrorResponse - err := acceptTypeDecoder(errBody, acceptType, &errorResponse) - if err != nil { - return err - } - return errorResponse -} - -// invalidBucketToError - invalid bucket to errorResponse -func invalidBucketError(bucket string) error { - // verify bucket name in accordance with - // - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html - isValidBucket := func(bucket string) bool { - if len(bucket) < 3 || len(bucket) > 63 { - return false - } - if bucket[0] == '.' || bucket[len(bucket)-1] == '.' { - return false - } - if match, _ := regexp.MatchString("\\.\\.", bucket); match == true { - return false - } - // We don't support buckets with '.' in them - match, _ := regexp.MatchString("^[a-zA-Z][a-zA-Z0-9\\-]+[a-zA-Z0-9]$", bucket) - return match - } - - if !isValidBucket(strings.TrimSpace(bucket)) { - // no resource since bucket is empty string - errorResponse := ErrorResponse{ - Code: "InvalidBucketName", - Message: "The specified bucket is not valid.", - RequestID: "minio", - } - return errorResponse - } - return nil -} - -// invalidObjectError invalid object name to errorResponse -func invalidObjectError(object string) error { - if strings.TrimSpace(object) == "" || object == "" { - // no resource since object name is empty - errorResponse := ErrorResponse{ - Code: "NoSuchKey", - Message: "The specified key does not exist.", - RequestID: "minio", - } - return errorResponse - } - return nil -} - -// invalidArgumentError invalid argument to errorResponse -func invalidArgumentError(arg string) error { - errorResponse := ErrorResponse{ - Code: "InvalidArgument", - Message: "Invalid Argument.", - RequestID: "minio", - } - if strings.TrimSpace(arg) == "" || arg == "" { - // no resource since arg is empty string - return errorResponse - } - if !utf8.ValidString(arg) { - // add resource to reply back with invalid string - errorResponse.Resource = arg - return errorResponse - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/bucketexists.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/bucketexists.go index cb9e3e288..0629d0a2d 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/bucketexists.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/bucketexists.go @@ -25,16 +25,22 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Note: my-bucketname is a dummy value, please replace them with original value. + + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - err = s3Client.BucketExists("mybucket") + + err = s3Client.BucketExists("my-bucketname") if err != nil { log.Fatalln(err) } + log.Println("Success") } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/fgetobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/fgetobject.go new file mode 100644 index 000000000..57856a578 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/fgetobject.go @@ -0,0 +1,44 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + // Note: my-bucketname, my-objectname and my-filename.csv are dummy values, please replace them with original values. + + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) + if err != nil { + log.Fatalln(err) + } + + if err := s3Client.FGetObject("bucket-name", "objectName", "fileName.csv"); err != nil { + log.Fatalln(err) + } + log.Println("Successfully saved fileName.csv") +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/fputobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/fputobject.go new file mode 100644 index 000000000..5f85b5c07 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/fputobject.go @@ -0,0 +1,44 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + // Note: my-bucketname, my-objectname and my-filename.csv are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) + if err != nil { + log.Fatalln(err) + } + + if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", "application/csv"); err != nil { + log.Fatalln(err) + } + log.Println("Successfully uploaded my-filename.csv") +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getbucketacl.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getbucketacl.go index 5b0cec786..202baa3a3 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getbucketacl.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getbucketacl.go @@ -25,14 +25,19 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Note: my-bucketname is a dummy value, please replace them with original value. + + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - acl, err := s3Client.GetBucketACL("mybucket") + + acl, err := s3Client.GetBucketACL("my-bucketname") if err != nil { log.Fatalln(err) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobject.go index 71a6d92a0..041a136c1 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobject.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobject.go @@ -27,25 +27,30 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) - if err != nil { - log.Fatalln(err) - } - reader, stat, err := s3Client.GetObject("mybucket", "myobject") + // Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values. + + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - localfile, err := os.Create("testfile") + reader, _, err := s3Client.GetObject("my-bucketname", "my-objectname") + if err != nil { + log.Fatalln(err) + } + + localfile, err := os.Create("my-testfile") if err != nil { log.Fatalln(err) } defer localfile.Close() - if _, err = io.CopyN(localfile, reader, stat.Size); err != nil { + if _, err = io.Copy(localfile, reader); err != nil { log.Fatalln(err) } } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobjectpartial.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobjectpartial.go new file mode 100644 index 000000000..db65359ca --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobjectpartial.go @@ -0,0 +1,91 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "errors" + "io" + "log" + "os" + + "github.com/minio/minio-go" +) + +func main() { + // Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) + if err != nil { + log.Fatalln(err) + } + + reader, stat, err := s3Client.GetObjectPartial("my-bucketname", "my-objectname") + if err != nil { + log.Fatalln(err) + } + defer reader.Close() + + localFile, err := os.OpenFile("my-testfile", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) + if err != nil { + log.Fatalln(err) + } + defer localfile.Close() + + st, err := localFile.Stat() + if err != nil { + log.Fatalln(err) + } + + readAtOffset := st.Size() + readAtBuffer := make([]byte, 5*1024*1024) + + // Loop and write. + for { + readAtSize, rerr := reader.ReadAt(readAtBuffer, readAtOffset) + if rerr != nil { + if rerr != io.EOF { + log.Fatalln(rerr) + } + } + writeSize, werr := localFile.Write(readAtBuffer[:readAtSize]) + if werr != nil { + log.Fatalln(werr) + } + if readAtSize != writeSize { + log.Fatalln(errors.New("Something really bad happened here.")) + } + readAtOffset += int64(writeSize) + if rerr == io.EOF { + break + } + } + + // totalWritten size. + totalWritten := readAtOffset + + // If found mismatch error out. + if totalWritten != stat.Size { + log.Fatalln(errors.New("Something really bad happened here.")) + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listbuckets.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listbuckets.go index 8148ba8a2..b5e505ccc 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listbuckets.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listbuckets.go @@ -25,17 +25,21 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - for bucket := range s3Client.ListBuckets() { - if bucket.Err != nil { - log.Fatalln(bucket.Err) - } - log.Println(bucket.Stat) + + buckets, err := s3Client.ListBuckets() + if err != nil { + log.Fatalln(err) + } + for _, bucket := range buckets { + log.Println(bucket) } } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listincompleteuploads.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listincompleteuploads.go index f73833aca..a4fcc95e8 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listincompleteuploads.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listincompleteuploads.go @@ -19,26 +19,38 @@ package main import ( + "fmt" "log" "github.com/minio/minio-go" ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Note: my-bucketname and my-prefixname are dummy values, please replace them with original values. + + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - // Recursive - for multipartObject := range s3Client.ListIncompleteUploads("mybucket", "myobject", true) { + + // Create a done channel to control 'ListObjects' go routine. + doneCh := make(struct{}) + + // Indicate to our routine to exit cleanly upon return. + defer close(doneCh) + + // List all multipart uploads from a bucket-name with a matching prefix. + for multipartObject := range s3Client.ListIncompleteUploads("my-bucketname", "my-prefixname", true, doneCh) { if multipartObject.Err != nil { - log.Fatalln(multipartObject.Err) + fmt.Println(multipartObject.Err) + return } - log.Println(multipartObject) + fmt.Println(multipartObject) } + return } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listobjects.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listobjects.go index 1908d7224..cd1ad6b7f 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listobjects.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listobjects.go @@ -19,23 +19,38 @@ package main import ( + "fmt" "log" "github.com/minio/minio-go" ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Note: my-bucketname and my-prefixname are dummy values, please replace them with original values. + + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - for object := range s3Client.ListObjects("mybucket", "", true) { + + // Create a done channel to control 'ListObjects' go routine. + doneCh := make(struct{}) + + // Indicate to our routine to exit cleanly upon return. + defer close(doneCh) + + // List all objects from a bucket-name with a matching prefix. + for object := range s3Client.ListObjects("my-bucketname", "my-prefixname", true, doneCh) { if object.Err != nil { - log.Fatalln(object.Err) + fmt.Println(object.Err) + return } - log.Println(object.Stat) + fmt.Println(object) } + return } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/makebucket.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/makebucket.go index 1fcfb7151..52bebf1a5 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/makebucket.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/makebucket.go @@ -25,14 +25,19 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Note: my-bucketname is a dummy value, please replace them with original value. + + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - err = s3Client.MakeBucket("mybucket", "") + + err = s3Client.MakeBucket("my-bucketname", minio.BucketACL("private"), "us-east-1") if err != nil { log.Fatalln(err) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getpartialobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedgetobject.go similarity index 56% rename from Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getpartialobject.go rename to Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedgetobject.go index b4e2c54b4..2ba878a97 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getpartialobject.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedgetobject.go @@ -19,33 +19,28 @@ package main import ( - "io" "log" - "os" + "time" "github.com/minio/minio-go" ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) - if err != nil { - log.Fatalln(err) - } - reader, stat, err := s3Client.GetPartialObject("mybucket", "myobject", 0, 10) + // Note: my-bucketname and my-objectname are dummy values, please replace them with original values. + + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - localfile, err := os.Create("testfile") + presignedURL, err := s3Client.PresignedGetObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second) if err != nil { log.Fatalln(err) } - defer localfile.Close() - - if _, err = io.CopyN(localfile, reader, stat.Size); err != nil { - log.Fatalln(err) - } + log.Println(presignedURL) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedpostpolicy.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedpostpolicy.go new file mode 100644 index 000000000..65fa66ddf --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedpostpolicy.go @@ -0,0 +1,56 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "fmt" + "log" + "time" + + "github.com/minio/minio-go" +) + +func main() { + // Note: my-bucketname and my-objectname are dummy values, please replace them with original values. + + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) + if err != nil { + log.Fatalln(err) + } + + policy := minio.NewPostPolicy() + policy.SetBucket("my-bucketname") + policy.SetKey("my-objectname") + policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days + m, err := s3Client.PresignedPostPolicy(policy) + if err != nil { + log.Fatalln(err) + } + fmt.Printf("curl ") + for k, v := range m { + fmt.Printf("-F %s=%s ", k, v) + } + fmt.Printf("-F file=@/etc/bashrc ") + fmt.Printf("https://play.minio.io:9002/my-bucketname\n") +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getpartialobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedputobject.go similarity index 56% rename from Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getpartialobject.go rename to Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedputobject.go index 591b4be3a..b55f721f7 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getpartialobject.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedputobject.go @@ -19,35 +19,28 @@ package main import ( - "io" "log" - "os" + "time" "github.com/minio/minio-go" ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://s3.amazonaws.com", - } - s3Client, err := minio.New(config) - if err != nil { - log.Fatalln(err) - } - reader, stat, err := s3Client.GetPartialObject("mybucket", "myobject", 0, 10) + // Note: my-bucketname and my-objectname are dummy values, please replace them with original values. + + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - localfile, err := os.Create("testfile") + presignedURL, err := s3Client.PresignedPutObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second) if err != nil { log.Fatalln(err) } - defer localfile.Close() - - if _, err = io.CopyN(localfile, reader, stat.Size); err != nil { - log.Fatalln(err) - } + log.Println(presignedURL) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobject.go index 5cf057286..d7efb7b43 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobject.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobject.go @@ -26,27 +26,28 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values. + + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - object, err := os.Open("testfile") + + object, err := os.Open("my-testfile") if err != nil { log.Fatalln(err) } defer object.Close() - objectInfo, err := object.Stat() - if err != nil { - object.Close() - log.Fatalln(err) - } - err = s3Client.PutObject("mybucket", "myobject", "application/octet-stream", objectInfo.Size(), object) + st, _ := object.Stat() + n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, st.Size(), "application/octet-stream") if err != nil { log.Fatalln(err) } - + log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.") } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobjectpartial.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobjectpartial.go new file mode 100644 index 000000000..aff67f8e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobjectpartial.go @@ -0,0 +1,56 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + "os" + + "github.com/minio/minio-go" +) + +func main() { + // Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) + if err != nil { + log.Fatalln(err) + } + + localFile, err := os.Open("testfile") + if err != nil { + log.Fatalln(err) + } + + st, err := localFile.Stat() + if err != nil { + log.Fatalln(err) + } + defer localFile.Close() + + _, err = s3Client.PutObjectPartial("bucket-name", "objectName", localFile, st.Size(), "text/plain") + if err != nil { + log.Fatalln(err) + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removebucket.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removebucket.go index 6004c90e2..1d2d03ba3 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removebucket.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removebucket.go @@ -25,16 +25,19 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Note: my-bucketname is a dummy value, please replace them with original value. + + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - err = s3Client.RemoveBucket("mybucket") + // This operation will only work if your bucket is empty. + err = s3Client.RemoveBucket("my-bucketname") if err != nil { log.Fatalln(err) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeincompleteupload.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeincompleteupload.go index 4d5b49c1c..458a4c450 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeincompleteupload.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeincompleteupload.go @@ -25,14 +25,19 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Note: my-bucketname and my-objectname are dummy values, please replace them with original values. + + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - for err := range s3Client.RemoveIncompleteUpload("mybucket", "myobject") { + + for err := range s3Client.RemoveIncompleteUpload("my-bucketname", "my-objectname") { if err != nil { log.Fatalln(err) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeobject.go index 4447b65ab..2301a77de 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeobject.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeobject.go @@ -25,16 +25,18 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Note: my-bucketname and my-objectname are dummy values, please replace them with original values. + + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - err = s3Client.RemoveObject("mybucket", "myobject") + err = s3Client.RemoveObject("my-bucketname", "my-objectname") if err != nil { log.Fatalln(err) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/setbucketacl.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/setbucketacl.go index f85d1256a..7893018f7 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/setbucketacl.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/setbucketacl.go @@ -25,14 +25,19 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Note: my-bucketname is a dummy value, please replace them with original value. + + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - err = s3Client.SetBucketACL("mybucket", minio.BucketACL("public-read-write")) + + err = s3Client.SetBucketACL("my-bucketname", minio.BucketACL("public-read-write")) if err != nil { log.Fatalln(err) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/statobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/statobject.go index bb3844900..8f24460ab 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/statobject.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/statobject.go @@ -25,14 +25,18 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Note: my-bucketname and my-objectname are dummy values, please replace them with original values. + + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - stat, err := s3Client.StatObject("mybucket", "myobject") + stat, err := s3Client.StatObject("my-bucketname", "my-objectname") if err != nil { log.Fatalln(err) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/bucketexists.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/bucketexists.go index 7b0b17f82..59b205dab 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/bucketexists.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/bucketexists.go @@ -25,18 +25,23 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://s3.amazonaws.com", - } - s3Client, err := minio.New(config) + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) if err != nil { log.Fatalln(err) } - err = s3Client.BucketExists("mybucket") + + err = s3Client.BucketExists("my-bucketname") if err != nil { log.Fatalln(err) } + log.Println("Success") } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/fgetobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/fgetobject.go new file mode 100644 index 000000000..a936d5a3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/fgetobject.go @@ -0,0 +1,45 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname + // and my-filename.csv are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) + if err != nil { + log.Fatalln(err) + } + + if err := s3Client.FGetObject("my-bucketname", "my-objectname", "my-filename.csv"); err != nil { + log.Fatalln(err) + } + log.Println("Successfully saved my-filename.csv") +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/fputobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/fputobject.go new file mode 100644 index 000000000..f295dd778 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/fputobject.go @@ -0,0 +1,45 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname + // and my-filename.csv are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) + if err != nil { + log.Fatalln(err) + } + + if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", "application/csv"); err != nil { + log.Fatalln(err) + } + log.Println("Successfully uploaded my-filename.csv") +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getbucketacl.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getbucketacl.go index c9fbe78c3..24991df0c 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getbucketacl.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getbucketacl.go @@ -25,16 +25,20 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://s3.amazonaws.com", - } - s3Client, err := minio.New(config) + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) if err != nil { log.Fatalln(err) } - acl, err := s3Client.GetBucketACL("mybucket") + + acl, err := s3Client.GetBucketACL("my-bucketname") if err != nil { log.Fatalln(err) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobject.go index d0082d90a..0125491ab 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobject.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobject.go @@ -27,27 +27,31 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://s3.amazonaws.com", - } - s3Client, err := minio.New(config) - if err != nil { - log.Fatalln(err) - } - reader, stat, err := s3Client.GetObject("mybucket", "myobject") + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and + // my-testfile are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) if err != nil { log.Fatalln(err) } - localfile, err := os.Create("testfile") + reader, _, err := s3Client.GetObject("my-bucketname", "my-objectname") + if err != nil { + log.Fatalln(err) + } + + localfile, err := os.Create("my-testfile") if err != nil { log.Fatalln(err) } defer localfile.Close() - if _, err = io.CopyN(localfile, reader, stat.Size); err != nil { + if _, err = io.Copy(localfile, reader); err != nil { log.Fatalln(err) } } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobjectpartial.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobjectpartial.go new file mode 100644 index 000000000..2c32c8449 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobjectpartial.go @@ -0,0 +1,92 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "errors" + "io" + "log" + "os" + + "github.com/minio/minio-go" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and + // my-testfile are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) + if err != nil { + log.Fatalln(err) + } + + reader, stat, err := s3Client.GetObjectPartial("my-bucketname", "my-objectname") + if err != nil { + log.Fatalln(err) + } + defer reader.Close() + + localFile, err := os.OpenFile("my-testfile", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) + if err != nil { + log.Fatalln(err) + } + defer localfile.Close() + + st, err := localFile.Stat() + if err != nil { + log.Fatalln(err) + } + + readAtOffset := st.Size() + readAtBuffer := make([]byte, 5*1024*1024) + + // For loop. + for { + readAtSize, rerr := reader.ReadAt(readAtBuffer, readAtOffset) + if rerr != nil { + if rerr != io.EOF { + log.Fatalln(rerr) + } + } + writeSize, werr := localFile.Write(readAtBuffer[:readAtSize]) + if werr != nil { + log.Fatalln(werr) + } + if readAtSize != writeSize { + log.Fatalln(errors.New("Something really bad happened here.")) + } + readAtOffset += int64(writeSize) + if rerr == io.EOF { + break + } + } + + // totalWritten size. + totalWritten := readAtOffset + + // If found mismatch error out. + if totalWritten != stat.Size { + log.Fatalln(errors.New("Something really bad happened here.")) + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listbuckets.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listbuckets.go index 5aff5a1a2..1b29ebbcf 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listbuckets.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listbuckets.go @@ -25,19 +25,24 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://s3.amazonaws.com", - } - s3Client, err := minio.New(config) + // Note: YOUR-ACCESSKEYID and YOUR-SECRETACCESSKEY are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) if err != nil { log.Fatalln(err) } - for bucket := range s3Client.ListBuckets() { - if bucket.Err != nil { - log.Fatalln(bucket.Err) - } - log.Println(bucket.Stat) + + buckets, err := s3Client.ListBuckets() + if err != nil { + log.Fatalln(err) + } + for _, bucket := range buckets { + log.Println(bucket) } } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listincompleteuploads.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listincompleteuploads.go index 0ceab2b28..93f91d581 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listincompleteuploads.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listincompleteuploads.go @@ -19,26 +19,39 @@ package main import ( + "fmt" "log" "github.com/minio/minio-go" ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://s3.amazonaws.com", - } - s3Client, err := minio.New(config) + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname + // are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) if err != nil { log.Fatalln(err) } - // Recursive - for multipartObject := range s3Client.ListIncompleteUploads("mybucket", "myobject", true) { + + // Create a done channel to control 'ListObjects' go routine. + doneCh := make(struct{}) + + // Indicate to our routine to exit cleanly upon return. + defer close(doneCh) + + // List all multipart uploads from a bucket-name with a matching prefix. + for multipartObject := range s3Client.ListIncompleteUploads("my-bucketname", "my-prefixname", true, doneCh) { if multipartObject.Err != nil { - log.Fatalln(multipartObject.Err) + fmt.Println(multipartObject.Err) + return } - log.Println(multipartObject) + fmt.Println(multipartObject) } + return } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listobjects.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listobjects.go index a091fbbf4..29b61dc94 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listobjects.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listobjects.go @@ -19,25 +19,39 @@ package main import ( - "log" + "fmt" "github.com/minio/minio-go" ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://s3.amazonaws.com", - } - s3Client, err := minio.New(config) + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname + // are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) if err != nil { - log.Fatalln(err) + fmt.Println(err) + return } - for object := range s3Client.ListObjects("mybucket", "", true) { + + // Create a done channel to control 'ListObjects' go routine. + doneCh := make(struct{}) + + // Indicate to our routine to exit cleanly upon return. + defer close(doneCh) + + // List all objects from a bucket-name with a matching prefix. + for object := range s3Client.ListObjects("my-bucketname", "my-prefixname", true, doneCh) { if object.Err != nil { - log.Fatalln(object.Err) + fmt.Println(object.Err) + return } - log.Println(object.Stat) + fmt.Println(object) } + return } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/makebucket.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/makebucket.go index 5b97ca128..22f9e18f2 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/makebucket.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/makebucket.go @@ -25,16 +25,20 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://s3.amazonaws.com", - } - s3Client, err := minio.New(config) + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) if err != nil { log.Fatalln(err) } - err = s3Client.MakeBucket("mybucket", "") + + err = s3Client.MakeBucket("my-bucketname", minio.BucketACL("private"), "us-east-1") if err != nil { log.Fatalln(err) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedgetobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedgetobject.go index fc96bb002..08929cdc0 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedgetobject.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedgetobject.go @@ -26,18 +26,22 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://s3.amazonaws.com", - } - s3Client, err := minio.New(config) + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname + // are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) if err != nil { log.Fatalln(err) } - string, err := s3Client.PresignedGetObject("mybucket", "myobject", time.Duration(1000)*time.Second) + + presignedURL, err := s3Client.PresignedGetObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second) if err != nil { log.Fatalln(err) } - log.Println(string) + log.Println(presignedURL) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go index c41cae461..eb73e88e8 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go @@ -27,28 +27,31 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://s3.amazonaws.com", - } - s3Client, err := minio.New(config) + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname + // are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) if err != nil { log.Fatalln(err) } + policy := minio.NewPostPolicy() - policy.SetKey("myobject") - policy.SetBucket("mybucket") + policy.SetBucket("my-bucketname") + policy.SetKey("my-objectname") policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days m, err := s3Client.PresignedPostPolicy(policy) if err != nil { - fmt.Println(err) - return + log.Fatalln(err) } fmt.Printf("curl ") for k, v := range m { fmt.Printf("-F %s=%s ", k, v) } - fmt.Printf("-F file=@/etc/bashrc ") - fmt.Printf(config.Endpoint + "/mybucket\n") + fmt.Printf("-F file=@/etc/bash.bashrc ") + fmt.Printf("https://my-bucketname.s3.amazonaws.com\n") } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedputobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedputobject.go index 7675cabb8..96d243c7e 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedputobject.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedputobject.go @@ -26,18 +26,22 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://s3.amazonaws.com", - } - s3Client, err := minio.New(config) + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname + // are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) if err != nil { log.Fatalln(err) } - string, err := s3Client.PresignedPutObject("mybucket", "myobject", time.Duration(1000)*time.Second) + + presignedURL, err := s3Client.PresignedPutObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second) if err != nil { log.Fatalln(err) } - log.Println(string) + log.Println(presignedURL) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobject.go index b67832b7f..963060487 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobject.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobject.go @@ -26,29 +26,29 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://s3.amazonaws.com", - } - s3Client, err := minio.New(config) + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and + // my-objectname are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) if err != nil { log.Fatalln(err) } - object, err := os.Open("testfile") + + object, err := os.Open("my-testfile") if err != nil { log.Fatalln(err) } defer object.Close() - objectInfo, err := object.Stat() - if err != nil { - object.Close() - log.Fatalln(err) - } - err = s3Client.PutObject("mybucket", "myobject", "application/octet-stream", objectInfo.Size(), object) + st, _ := object.Stat() + n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, st.Size(), "application/octet-stream") if err != nil { log.Fatalln(err) } - + log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.") } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobjectpartial.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobjectpartial.go new file mode 100644 index 000000000..e59b2ad4d --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobjectpartial.go @@ -0,0 +1,57 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + "os" + + "github.com/minio/minio-go" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and + // my-testfile are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) + if err != nil { + log.Fatalln(err) + } + + localFile, err := os.Open("my-testfile") + if err != nil { + log.Fatalln(err) + } + + st, err := localFile.Stat() + if err != nil { + log.Fatalln(err) + } + defer localFile.Close() + + _, err = s3Client.PutObjectPartial("my-bucketname", "my-objectname", localFile, st.Size(), "text/plain") + if err != nil { + log.Fatalln(err) + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removebucket.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removebucket.go index 65f9e16d9..d22d18bea 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removebucket.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removebucket.go @@ -25,16 +25,21 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://s3.amazonaws.com", - } - s3Client, err := minio.New(config) + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) if err != nil { log.Fatalln(err) } - err = s3Client.RemoveBucket("mybucket") + + // This operation will only work if your bucket is empty. + err = s3Client.RemoveBucket("my-bucketname") if err != nil { log.Fatalln(err) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeincompleteupload.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeincompleteupload.go index cb78304d3..8b7533472 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeincompleteupload.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeincompleteupload.go @@ -25,16 +25,20 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://s3.amazonaws.com", - } - s3Client, err := minio.New(config) + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname + // are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) if err != nil { log.Fatalln(err) } - for err := range s3Client.RemoveIncompleteUpload("mybucket", "myobject") { + + for err := range s3Client.RemoveIncompleteUpload("my-bucketname", "my-objectname") { if err != nil { log.Fatalln(err) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeobject.go index 07761ebd9..c1b08458f 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeobject.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeobject.go @@ -25,16 +25,19 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://s3.amazonaws.com", - } - s3Client, err := minio.New(config) + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname + // are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) if err != nil { log.Fatalln(err) } - err = s3Client.RemoveObject("mybucket", "myobject") + err = s3Client.RemoveObject("my-bucketname", "my-objectname") if err != nil { log.Fatalln(err) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/setbucketacl.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/setbucketacl.go index dfe3af630..59fb10ef7 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/setbucketacl.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/setbucketacl.go @@ -25,16 +25,20 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://s3.amazonaws.com", - } - s3Client, err := minio.New(config) + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname + // are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) if err != nil { log.Fatalln(err) } - err = s3Client.SetBucketACL("mybucket", minio.BucketACL("public-read-write")) + + err = s3Client.SetBucketACL("my-bucketname", minio.BucketACL("public-read-write")) if err != nil { log.Fatalln(err) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/statobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/statobject.go index 400670f19..1eb6c604f 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/statobject.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/statobject.go @@ -25,16 +25,19 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://s3.amazonaws.com", - } - s3Client, err := minio.New(config) + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname + // are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) if err != nil { log.Fatalln(err) } - stat, err := s3Client.StatObject("mybucket", "myobject") + stat, err := s3Client.StatObject("my-bucketname", "my-objectname") if err != nil { log.Fatalln(err) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/io.go b/Godeps/_workspace/src/github.com/minio/minio-go/io.go deleted file mode 100644 index 71b4363a8..000000000 --- a/Godeps/_workspace/src/github.com/minio/minio-go/io.go +++ /dev/null @@ -1,67 +0,0 @@ -package minio - -import "io" - -// ReadSeekCloser wraps an io.Reader returning a ReaderSeekerCloser -func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { - return ReaderSeekerCloser{r} -} - -// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and -// io.Closer interfaces to the underlying object if available. -type ReaderSeekerCloser struct { - r io.Reader -} - -// Read reads up to len(p) bytes into p. It returns the number of bytes -// read (0 <= n <= len(p)) and any error encountered. Even if Read -// returns n < len(p), it may use all of p as scratch space during the call. -// If some data is available but not len(p) bytes, Read conventionally -// returns what is available instead of waiting for more. -// -// When Read encounters an error or end-of-file condition after -// successfully reading n > 0 bytes, it returns the number of -// bytes read. It may return the (non-nil) error from the same call -// or return the error (and n == 0) from a subsequent call. -// An instance of this general case is that a Reader returning -// a non-zero number of bytes at the end of the input stream may -// return either err == EOF or err == nil. The next Read should -// return 0, EOF. -func (r ReaderSeekerCloser) Read(p []byte) (int, error) { - switch t := r.r.(type) { - case io.Reader: - return t.Read(p) - } - return 0, nil -} - -// Seek sets the offset for the next Read or Write to offset, -// interpreted according to whence: 0 means relative to the start of -// the file, 1 means relative to the current offset, and 2 means -// relative to the end. Seek returns the new offset relative to the -// start of the file and an error, if any. -// -// Seeking to an offset before the start of the file is an error. -// -// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. -func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { - switch t := r.r.(type) { - case io.Seeker: - return t.Seek(offset, whence) - } - return int64(0), nil -} - -// Close closes the ReaderSeekerCloser. -// -// The behavior of Close after the first call is undefined. -// Specific implementations may document their own behavior. -// -// If the ReaderSeekerCloser is not an io.Closer nothing will be done. -func (r ReaderSeekerCloser) Close() error { - switch t := r.r.(type) { - case io.Closer: - return t.Close() - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/post-policy.go b/Godeps/_workspace/src/github.com/minio/minio-go/post-policy.go index a1637545a..2d3082755 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/post-policy.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/post-policy.go @@ -8,145 +8,177 @@ import ( "time" ) -// expirationDateFormat date format for expiration key in json policy +// expirationDateFormat date format for expiration key in json policy. const expirationDateFormat = "2006-01-02T15:04:05.999Z" -// Policy explanation: http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html -type policy struct { +// policyCondition explanation: http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html +// +// Example: +// +// policyCondition { +// matchType: "$eq", +// key: "$Content-Type", +// value: "image/png", +// } +// +type policyCondition struct { matchType string - key string + condition string value string } // PostPolicy provides strict static type conversion and validation for Amazon S3's POST policy JSON string. type PostPolicy struct { - expiration time.Time // expiration date and time of the POST policy. - policies []policy + expiration time.Time // expiration date and time of the POST policy. + conditions []policyCondition // collection of different policy conditions. + // contentLengthRange minimum and maximum allowable size for the uploaded content. contentLengthRange struct { - min int - max int + min int64 + max int64 } - // Post form data + // Post form data. formData map[string]string } -// NewPostPolicy instantiate new post policy +// NewPostPolicy instantiate new post policy. func NewPostPolicy() *PostPolicy { p := &PostPolicy{} - p.policies = make([]policy, 0) + p.conditions = make([]policyCondition, 0) p.formData = make(map[string]string) return p } -// SetExpires expiration time +// SetExpires expiration time. func (p *PostPolicy) SetExpires(t time.Time) error { if t.IsZero() { - return errors.New("time input invalid") + return errors.New("No expiry time set.") } p.expiration = t return nil } -// SetKey Object name +// SetKey Object name. func (p *PostPolicy) SetKey(key string) error { if strings.TrimSpace(key) == "" || key == "" { - return errors.New("key invalid") + return errors.New("Object name is not specified.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$key", + value: key, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err } - policy := policy{"eq", "$key", key} - p.policies = append(p.policies, policy) p.formData["key"] = key return nil } -// SetKeyStartsWith Object name that can start with +// SetKeyStartsWith Object name that can start with. func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { - return errors.New("key-starts-with invalid") + return errors.New("Object prefix is not specified.") + } + policyCond := policyCondition{ + matchType: "starts-with", + condition: "$key", + value: keyStartsWith, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err } - policy := policy{"starts-with", "$key", keyStartsWith} - p.policies = append(p.policies, policy) p.formData["key"] = keyStartsWith return nil } -// SetBucket bucket name -func (p *PostPolicy) SetBucket(bucket string) error { - if strings.TrimSpace(bucket) == "" || bucket == "" { - return errors.New("bucket invalid") +// SetBucket bucket name. +func (p *PostPolicy) SetBucket(bucketName string) error { + if strings.TrimSpace(bucketName) == "" || bucketName == "" { + return errors.New("Bucket name is not specified.") } - policy := policy{"eq", "$bucket", bucket} - p.policies = append(p.policies, policy) - p.formData["bucket"] = bucket + policyCond := policyCondition{ + matchType: "eq", + condition: "$bucket", + value: bucketName, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["bucket"] = bucketName return nil } -// SetContentType content-type +// SetContentType content-type. func (p *PostPolicy) SetContentType(contentType string) error { if strings.TrimSpace(contentType) == "" || contentType == "" { - return errors.New("contentType invalid") + return errors.New("No content type specified.") } - policy := policy{"eq", "$Content-Type", contentType} - if err := p.addNewPolicy(policy); err != nil { + policyCond := policyCondition{ + matchType: "eq", + condition: "$Content-Type", + value: contentType, + } + if err := p.addNewPolicy(policyCond); err != nil { return err } p.formData["Content-Type"] = contentType return nil } -// SetContentLength - set new min and max content legnth condition -func (p *PostPolicy) SetContentLength(min, max int) error { +// SetContentLengthRange - set new min and max content length condition. +func (p *PostPolicy) SetContentLengthRange(min, max int64) error { if min > max { - return errors.New("minimum cannot be bigger than maximum") + return errors.New("minimum limit is larger than maximum limit") } if min < 0 { - return errors.New("minimum cannot be negative") + return errors.New("minimum limit cannot be negative") } if max < 0 { - return errors.New("maximum cannot be negative") + return errors.New("maximum limit cannot be negative") } p.contentLengthRange.min = min p.contentLengthRange.max = max return nil } -// addNewPolicy - internal helper to validate adding new policies -func (p *PostPolicy) addNewPolicy(po policy) error { - if po.matchType == "" || po.key == "" || po.value == "" { - return errors.New("policy invalid") +// addNewPolicy - internal helper to validate adding new policies. +func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { + if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { + return errors.New("Policy fields empty.") } - p.policies = append(p.policies, po) + p.conditions = append(p.conditions, policyCond) return nil } -// Stringer interface for printing in pretty manner +// Stringer interface for printing in pretty manner. func (p PostPolicy) String() string { return string(p.marshalJSON()) } -// marshalJSON provides Marshalled JSON +// marshalJSON provides Marshalled JSON. func (p PostPolicy) marshalJSON() []byte { expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` - var policiesStr string - policies := []string{} - for _, po := range p.policies { - policies = append(policies, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.key, po.value)) + var conditionsStr string + conditions := []string{} + for _, po := range p.conditions { + conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value)) } if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 { - policies = append(policies, fmt.Sprintf("[\"content-length-range\", %d, %d]", + conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]", p.contentLengthRange.min, p.contentLengthRange.max)) } - if len(policies) > 0 { - policiesStr = `"conditions":[` + strings.Join(policies, ",") + "]" + if len(conditions) > 0 { + conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]" } retStr := "{" retStr = retStr + expirationStr + "," - retStr = retStr + policiesStr + retStr = retStr + conditionsStr retStr = retStr + "}" return []byte(retStr) } -// base64 produces base64 of PostPolicy's Marshalled json +// base64 produces base64 of PostPolicy's Marshalled json. func (p PostPolicy) base64() string { return base64.StdEncoding.EncodeToString(p.marshalJSON()) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/request-common.go b/Godeps/_workspace/src/github.com/minio/minio-go/request-common.go deleted file mode 100644 index c63c16a13..000000000 --- a/Godeps/_workspace/src/github.com/minio/minio-go/request-common.go +++ /dev/null @@ -1,283 +0,0 @@ -package minio - -import ( - "encoding/hex" - "io" - "io/ioutil" - "net/http" - "regexp" - "strings" - "unicode/utf8" -) - -// operation - rest operation -type operation struct { - HTTPServer string - HTTPMethod string - HTTPPath string -} - -// request - a http request -type request struct { - req *http.Request - config *Config - body io.ReadSeeker - expires int64 -} - -// Do - start the request -func (r *request) Do() (resp *http.Response, err error) { - if r.config.AccessKeyID != "" && r.config.SecretAccessKey != "" { - if r.config.Signature.isV2() { - r.SignV2() - } - if r.config.Signature.isV4() || r.config.Signature.isLatest() { - r.SignV4() - } - } - transport := http.DefaultTransport - if r.config.Transport != nil { - transport = r.config.Transport - } - // do not use http.Client{}, while it may seem intuitive but the problem seems to be - // that http.Client{} internally follows redirects and there is no easier way to disable - // it from outside using a configuration parameter - - // this auto redirect causes complications in verifying subsequent errors - // - // The best is to use RoundTrip() directly, so the request comes back to the caller where - // we are going to handle such replies. And indeed that is the right thing to do here. - // - return transport.RoundTrip(r.req) -} - -// Set - set additional headers if any -func (r *request) Set(key, value string) { - r.req.Header.Set(key, value) -} - -// Get - get header values -func (r *request) Get(key string) string { - return r.req.Header.Get(key) -} - -func path2BucketAndObject(path string) (bucketName, objectName string) { - pathSplits := strings.SplitN(path, "?", 2) - splits := strings.SplitN(pathSplits[0], separator, 3) - switch len(splits) { - case 0, 1: - bucketName = "" - objectName = "" - case 2: - bucketName = splits[1] - objectName = "" - case 3: - bucketName = splits[1] - objectName = splits[2] - } - return bucketName, objectName -} - -// path2Object gives objectName from URL path -func path2Object(path string) (objectName string) { - _, objectName = path2BucketAndObject(path) - return -} - -// path2Bucket gives bucketName from URL path -func path2Bucket(path string) (bucketName string) { - bucketName, _ = path2BucketAndObject(path) - return -} - -// path2Query gives query part from URL path -func path2Query(path string) (query string) { - pathSplits := strings.SplitN(path, "?", 2) - if len(pathSplits) > 1 { - query = pathSplits[1] - } - return -} - -// getURLEncodedPath encode the strings from UTF-8 byte representations to HTML hex escape sequences -// -// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 -// non english characters cannot be parsed due to the nature in which url.Encode() is written -// -// This function on the other hand is a direct replacement for url.Encode() technique to support -// pretty much every UTF-8 character. -func getURLEncodedPath(pathName string) string { - // if object matches reserved string, no need to encode them - reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") - if reservedNames.MatchString(pathName) { - return pathName - } - var encodedPathname string - for _, s := range pathName { - if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) - encodedPathname = encodedPathname + string(s) - continue - } - switch s { - case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) - encodedPathname = encodedPathname + string(s) - continue - default: - len := utf8.RuneLen(s) - if len < 0 { - // if utf8 cannot convert return the same string as is - return pathName - } - u := make([]byte, len) - utf8.EncodeRune(u, s) - for _, r := range u { - hex := hex.EncodeToString([]byte{r}) - encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) - } - } - } - return encodedPathname -} - -func (op *operation) getRequestURL(config Config) (url string) { - // parse URL for the combination of HTTPServer + HTTPPath - url = op.HTTPServer + separator - if !config.isVirtualStyle { - url += path2Bucket(op.HTTPPath) - } - objectName := getURLEncodedPath(path2Object(op.HTTPPath)) - queryPath := path2Query(op.HTTPPath) - if objectName == "" && queryPath != "" { - url += "?" + queryPath - return - } - if objectName != "" && queryPath == "" { - if strings.HasSuffix(url, separator) { - url += objectName - } else { - url += separator + objectName - } - return - } - if objectName != "" && queryPath != "" { - if strings.HasSuffix(url, separator) { - url += objectName + "?" + queryPath - } else { - url += separator + objectName + "?" + queryPath - } - } - return -} - -func newPresignedRequest(op *operation, config *Config, expires int64) (*request, error) { - // if no method default to POST - method := op.HTTPMethod - if method == "" { - method = "POST" - } - - u := op.getRequestURL(*config) - - // get a new HTTP request, for the requested method - req, err := http.NewRequest(method, u, nil) - if err != nil { - return nil, err - } - - // set UserAgent - req.Header.Set("User-Agent", config.userAgent) - - // set Accept header for response encoding style, if available - if config.AcceptType != "" { - req.Header.Set("Accept", config.AcceptType) - } - - // save for subsequent use - r := new(request) - r.config = config - r.expires = expires - r.req = req - r.body = nil - - return r, nil -} - -// newUnauthenticatedRequest - instantiate a new unauthenticated request -func newUnauthenticatedRequest(op *operation, config *Config, body io.Reader) (*request, error) { - // if no method default to POST - method := op.HTTPMethod - if method == "" { - method = "POST" - } - - u := op.getRequestURL(*config) - - // get a new HTTP request, for the requested method - req, err := http.NewRequest(method, u, nil) - if err != nil { - return nil, err - } - - // set UserAgent - req.Header.Set("User-Agent", config.userAgent) - - // set Accept header for response encoding style, if available - if config.AcceptType != "" { - req.Header.Set("Accept", config.AcceptType) - } - - // add body - switch { - case body == nil: - req.Body = nil - default: - req.Body = ioutil.NopCloser(body) - } - - // save for subsequent use - r := new(request) - r.req = req - r.config = config - - return r, nil -} - -// newRequest - instantiate a new request -func newRequest(op *operation, config *Config, body io.ReadSeeker) (*request, error) { - // if no method default to POST - method := op.HTTPMethod - if method == "" { - method = "POST" - } - - u := op.getRequestURL(*config) - - // get a new HTTP request, for the requested method - req, err := http.NewRequest(method, u, nil) - if err != nil { - return nil, err - } - - // set UserAgent - req.Header.Set("User-Agent", config.userAgent) - - // set Accept header for response encoding style, if available - if config.AcceptType != "" { - req.Header.Set("Accept", config.AcceptType) - } - - // add body - switch { - case body == nil: - req.Body = nil - default: - req.Body = ioutil.NopCloser(body) - } - - // save for subsequent use - r := new(request) - r.config = config - r.req = req - r.body = body - - return r, nil -} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/request-v2.go b/Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v2.go similarity index 52% rename from Godeps/_workspace/src/github.com/minio/minio-go/request-v2.go rename to Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v2.go index aac4066b6..956b04f23 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/request-v2.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v2.go @@ -1,5 +1,5 @@ /* - * Minio Go Library for Amazon S3 Legacy v2 Signature Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,7 +21,6 @@ import ( "crypto/hmac" "crypto/sha1" "encoding/base64" - "errors" "fmt" "net/http" "net/url" @@ -31,45 +30,77 @@ import ( "time" ) -// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE} -func (r *request) PreSignV2() (string, error) { - if r.config.AccessKeyID == "" || r.config.SecretAccessKey == "" { - return "", errors.New("presign requires accesskey and secretkey") - } - // Add date if not present - d := time.Now().UTC() - if date := r.Get("Date"); date == "" { - r.Set("Date", d.Format(http.TimeFormat)) - } - epochExpires := d.Unix() + r.expires - var path string - if r.config.isVirtualStyle { - for k, v := range regions { - if v == r.config.Region { - path = "/" + strings.TrimSuffix(r.req.URL.Host, "."+k) - path += r.req.URL.Path - path = getURLEncodedPath(path) - break - } - } - } else { - path = getURLEncodedPath(r.req.URL.Path) - } - signText := fmt.Sprintf("%s\n\n\n%d\n%s", r.req.Method, epochExpires, path) - hm := hmac.New(sha1.New, []byte(r.config.SecretAccessKey)) - hm.Write([]byte(signText)) +// signature and API related constants. +const ( + signV2Algorithm = "AWS" +) - query := r.req.URL.Query() - query.Set("AWSAccessKeyId", r.config.AccessKeyID) - query.Set("Expires", strconv.FormatInt(epochExpires, 10)) - query.Set("Signature", base64.StdEncoding.EncodeToString(hm.Sum(nil))) - r.req.URL.RawQuery = query.Encode() - - return r.req.URL.String(), nil +// Encode input URL path to URL encoded path. +func encodeURL2Path(u *url.URL) (path string) { + // Encode URL path. + if strings.HasSuffix(u.Host, ".s3.amazonaws.com") { + path = "/" + strings.TrimSuffix(u.Host, ".s3.amazonaws.com") + path += u.Path + path = urlEncodePath(path) + return + } + if strings.HasSuffix(u.Host, ".storage.googleapis.com") { + path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com") + path += u.Path + path = urlEncodePath(path) + return + } + path = urlEncodePath(u.Path) + return } -func (r *request) PostPresignSignatureV2(policyBase64 string) string { - hm := hmac.New(sha1.New, []byte(r.config.SecretAccessKey)) +// PreSignV2 - presign the request in following style. +// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE} +func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request { + // presign is a noop for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return nil + } + d := time.Now().UTC() + // Add date if not present + if date := req.Header.Get("Date"); date == "" { + req.Header.Set("Date", d.Format(http.TimeFormat)) + } + + // Get encoded URL path. + path := encodeURL2Path(req.URL) + + // Find epoch expires when the request will expire. + epochExpires := d.Unix() + expires + + // get string to sign. + stringToSign := fmt.Sprintf("%s\n\n\n%d\n%s", req.Method, epochExpires, path) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // calculate signature. + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + + query := req.URL.Query() + // Handle specially for Google Cloud Storage. + if strings.Contains(req.URL.Host, ".storage.googleapis.com") { + query.Set("GoogleAccessId", accessKeyID) + } else { + query.Set("AWSAccessKeyId", accessKeyID) + } + + // Fill in Expires and Signature for presigned query. + query.Set("Expires", strconv.FormatInt(epochExpires, 10)) + query.Set("Signature", signature) + + // Encode query and save. + req.URL.RawQuery = query.Encode() + return &req +} + +// PostPresignSignatureV2 - presigned signature for PostPolicy request +func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { + hm := hmac.New(sha1.New, []byte(secretAccessKey)) hm.Write([]byte(policyBase64)) signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) return signature @@ -91,25 +122,32 @@ func (r *request) PostPresignSignatureV2(policyBase64 string) string { // // CanonicalizedProtocolHeaders = -// SignV2 the request before Do() (version 2.0) -func (r *request) SignV2() { - // Add date if not present - if date := r.Get("Date"); date == "" { - r.Set("Date", time.Now().UTC().Format(http.TimeFormat)) - } - // Calculate HMAC for secretAccessKey - hm := hmac.New(sha1.New, []byte(r.config.SecretAccessKey)) - hm.Write([]byte(r.getStringToSignV2())) +// SignV2 sign the request before Do() (AWS Signature Version 2). +func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request { + // Initial time. + d := time.Now().UTC() - // prepare auth header + // Add date if not present. + if date := req.Header.Get("Date"); date == "" { + req.Header.Set("Date", d.Format(http.TimeFormat)) + } + + // Calculate HMAC for secretAccessKey. + stringToSign := getStringToSignV2(req) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Prepare auth header. authHeader := new(bytes.Buffer) - authHeader.WriteString(fmt.Sprintf("AWS %s:", r.config.AccessKeyID)) + authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID)) encoder := base64.NewEncoder(base64.StdEncoding, authHeader) encoder.Write(hm.Sum(nil)) encoder.Close() - // Set Authorization header - r.req.Header.Set("Authorization", authHeader.String()) + // Set Authorization header. + req.Header.Set("Authorization", authHeader.String()) + + return &req } // From the Amazon docs: @@ -120,32 +158,34 @@ func (r *request) SignV2() { // Date + "\n" + // CanonicalizedProtocolHeaders + // CanonicalizedResource; -func (r *request) getStringToSignV2() string { +func getStringToSignV2(req http.Request) string { buf := new(bytes.Buffer) - // write standard headers - r.writeDefaultHeaders(buf) - // write canonicalized protocol headers if any - r.writeCanonicalizedHeaders(buf) - // write canonicalized Query resources if any - r.writeCanonicalizedResource(buf) + // write standard headers. + writeDefaultHeaders(buf, req) + // write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // write canonicalized Query resources if any. + writeCanonicalizedResource(buf, req) return buf.String() } -func (r *request) writeDefaultHeaders(buf *bytes.Buffer) { - buf.WriteString(r.req.Method) +// writeDefaultHeader - write all default necessary headers +func writeDefaultHeaders(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method) buf.WriteByte('\n') - buf.WriteString(r.req.Header.Get("Content-MD5")) + buf.WriteString(req.Header.Get("Content-MD5")) buf.WriteByte('\n') - buf.WriteString(r.req.Header.Get("Content-Type")) + buf.WriteString(req.Header.Get("Content-Type")) buf.WriteByte('\n') - buf.WriteString(r.req.Header.Get("Date")) + buf.WriteString(req.Header.Get("Date")) buf.WriteByte('\n') } -func (r *request) writeCanonicalizedHeaders(buf *bytes.Buffer) { +// writeCanonicalizedHeaders - write canonicalized headers. +func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { var protoHeaders []string vals := make(map[string][]string) - for k, vv := range r.req.Header { + for k, vv := range req.Header { // all the AMZ and GOOG headers should be lowercase lk := strings.ToLower(k) if strings.HasPrefix(lk, "x-amz") { @@ -205,25 +245,18 @@ var resourceList = []string{ // CanonicalizedResource = [ "/" + Bucket ] + // + // [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; -func (r *request) writeCanonicalizedResource(buf *bytes.Buffer) error { - requestURL := r.req.URL - if r.config.isVirtualStyle { - for k, v := range regions { - if v == r.config.Region { - path := "/" + strings.TrimSuffix(requestURL.Host, "."+k) - path += requestURL.Path - buf.WriteString(getURLEncodedPath(path)) - break - } - } - } else { - buf.WriteString(getURLEncodedPath(requestURL.Path)) - } +func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) error { + requestURL := req.URL + + // Get encoded URL path. + path := encodeURL2Path(requestURL) + buf.WriteString(path) + sort.Strings(resourceList) if requestURL.RawQuery != "" { var n int vals, _ := url.ParseQuery(requestURL.RawQuery) - // loop through all the supported resourceList + // loop through all the supported resourceList. for _, resource := range resourceList { if vv, ok := vals[resource]; ok && len(vv) > 0 { n++ diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v4.go b/Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v4.go new file mode 100644 index 000000000..515d8ab18 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v4.go @@ -0,0 +1,282 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "encoding/hex" + "net/http" + "sort" + "strconv" + "strings" + "time" +) + +// signature and API related constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" + yyyymmdd = "20060102" +) + +/// +/// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. +/// +/// User-Agent: +/// +/// This is ignored from signing because signing this causes problems with generating pre-signed URLs +/// (that are executed by other agents) or when customers pass requests through proxies, which may +/// modify the user-agent. +/// +/// Content-Length: +/// +/// This is ignored from signing because generating a pre-signed URL should not provide a content-length +/// constraint, specifically when vending a S3 pre-signed PUT URL. The corollary to this is that when +/// sending regular requests (non-pre-signed), the signature contains a checksum of the body, which +/// implicitly validates the payload length (since changing the number of bytes would change the checksum) +/// and therefore this header is not valuable in the signature. +/// +/// Content-Type: +/// +/// Signing this header causes quite a number of problems in browser environments, where browsers +/// like to modify and normalize the content-type header in different ways. There is more information +/// on this in https://github.com/aws/aws-sdk-js/issues/244. Avoiding this field simplifies logic +/// and reduces the possibility of future bugs +/// +/// Authorization: +/// +/// Is skipped for obvious reasons +/// +var ignoredHeaders = map[string]bool{ + "Authorization": true, + "Content-Type": true, + "Content-Length": true, + "User-Agent": true, +} + +// getSigningKey hmac seed to calculate final signature +func getSigningKey(secret, loc string, t time.Time) []byte { + date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) + location := sumHMAC(date, []byte(loc)) + service := sumHMAC(location, []byte("s3")) + signingKey := sumHMAC(service, []byte("aws4_request")) + return signingKey +} + +// getSignature final signature in hexadecimal form +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// getScope generate a string of a specific date, an AWS region, and a service +func getScope(location string, t time.Time) string { + scope := strings.Join([]string{ + t.Format(yyyymmdd), + location, + "s3", + "aws4_request", + }, "/") + return scope +} + +// getCredential generate a credential string +func getCredential(accessKeyID, location string, t time.Time) string { + scope := getScope(location, t) + return accessKeyID + "/" + scope +} + +// getHashedPayload get the hexadecimal value of the SHA256 hash of the request payload +func getHashedPayload(req http.Request) string { + hashedPayload := req.Header.Get("X-Amz-Content-Sha256") + if hashedPayload == "" { + // Presign does not have a payload, use S3 recommended value. + hashedPayload = "UNSIGNED-PAYLOAD" + } + return hashedPayload +} + +// getCanonicalHeaders generate a list of request headers for signature. +func getCanonicalHeaders(req http.Request) string { + var headers []string + vals := make(map[string][]string) + for k, vv := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // ignored header + } + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + headers = append(headers, "host") + sort.Strings(headers) + + var buf bytes.Buffer + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + switch { + case k == "host": + buf.WriteString(req.URL.Host) + fallthrough + default: + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(v) + } + buf.WriteByte('\n') + } + } + return buf.String() +} + +// getSignedHeaders generate all signed request headers. +// i.e alphabetically sorted, semicolon-separated list of lowercase request header names +func getSignedHeaders(req http.Request) string { + var headers []string + for k := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // ignored header + } + headers = append(headers, strings.ToLower(k)) + } + headers = append(headers, "host") + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getCanonicalRequest generate a canonical request of style. +// +// canonicalRequest = +// \n +// \n +// \n +// \n +// \n +// +// +func getCanonicalRequest(req http.Request) string { + req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) + canonicalRequest := strings.Join([]string{ + req.Method, + urlEncodePath(req.URL.Path), + req.URL.RawQuery, + getCanonicalHeaders(req), + getSignedHeaders(req), + getHashedPayload(req), + }, "\n") + return canonicalRequest +} + +// getStringToSign a string based on selected query values. +func getStringToSignV4(t time.Time, location, canonicalRequest string) string { + stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" + stringToSign = stringToSign + getScope(location, t) + "\n" + stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) + return stringToSign +} + +// PreSignV4 presign the request, in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. +func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request { + // presign is a noop for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return nil + } + // Initial time. + t := time.Now().UTC() + + // get credential string. + credential := getCredential(accessKeyID, location, t) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req) + + // set URL query. + query := req.URL.Query() + query.Set("X-Amz-Algorithm", signV4Algorithm) + query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) + query.Set("X-Amz-SignedHeaders", signedHeaders) + query.Set("X-Amz-Credential", credential) + req.URL.RawQuery = query.Encode() + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest) + + // get hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t) + + // calculate signature. + signature := getSignature(signingKey, stringToSign) + + // Add signature header to RawQuery. + req.URL.RawQuery += "&X-Amz-Signature=" + signature + + return &req +} + +// PostPresignSignatureV4 - presigned signature for PostPolicy requests. +func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { + signingkey := getSigningKey(secretAccessKey, location, t) + signature := getSignature(signingkey, policyBase64) + return signature +} + +// SignV4 sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. +func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { + // Initial time. + t := time.Now().UTC() + + // Set x-amz-date. + req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest) + + // get hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t) + + // get credential string. + credential := getCredential(accessKeyID, location, t) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req) + + // calculate signature. + signature := getSignature(signingKey, stringToSign) + + // if regular request, construct the final authorization header. + parts := []string{ + signV4Algorithm + " Credential=" + credential, + "SignedHeaders=" + signedHeaders, + "Signature=" + signature, + } + + // Set authorization header. + auth := strings.Join(parts, ", ") + req.Header.Set("Authorization", auth) + + return &req +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/request-v4.go b/Godeps/_workspace/src/github.com/minio/minio-go/request-v4.go deleted file mode 100644 index 09ef06a9a..000000000 --- a/Godeps/_workspace/src/github.com/minio/minio-go/request-v4.go +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "encoding/hex" - "errors" - "net/http" - "sort" - "strconv" - "strings" - "time" -) - -const ( - authHeader = "AWS4-HMAC-SHA256" - iso8601DateFormat = "20060102T150405Z" - yyyymmdd = "20060102" -) - -/// -/// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258 -/// -/// User-Agent: -/// -/// This is ignored from signing because signing this causes problems with generating pre-signed URLs -/// (that are executed by other agents) or when customers pass requests through proxies, which may -/// modify the user-agent. -/// -/// Content-Length: -/// -/// This is ignored from signing because generating a pre-signed URL should not provide a content-length -/// constraint, specifically when vending a S3 pre-signed PUT URL. The corollary to this is that when -/// sending regular requests (non-pre-signed), the signature contains a checksum of the body, which -/// implicitly validates the payload length (since changing the number of bytes would change the checksum) -/// and therefore this header is not valuable in the signature. -/// -/// Content-Type: -/// -/// Signing this header causes quite a number of problems in browser environments, where browsers -/// like to modify and normalize the content-type header in different ways. There is more information -/// on this in https://github.com/aws/aws-sdk-js/issues/244. Avoiding this field simplifies logic -/// and reduces the possibility of future bugs -/// -/// Authorization: -/// -/// Is skipped for obvious reasons -/// -var ignoredHeaders = map[string]bool{ - "Authorization": true, - "Content-Type": true, - "Content-Length": true, - "User-Agent": true, -} - -// getHashedPayload get the hexadecimal value of the SHA256 hash of the request payload -func (r *request) getHashedPayload() string { - hash := func() string { - switch { - case r.expires != 0: - return "UNSIGNED-PAYLOAD" - case r.body == nil: - return hex.EncodeToString(sum256([]byte{})) - default: - sum256Bytes, _ := sum256Reader(r.body) - return hex.EncodeToString(sum256Bytes) - } - } - hashedPayload := hash() - if hashedPayload != "UNSIGNED-PAYLOAD" { - r.req.Header.Set("X-Amz-Content-Sha256", hashedPayload) - } - return hashedPayload -} - -// getCanonicalHeaders generate a list of request headers with their values -func (r *request) getCanonicalHeaders() string { - var headers []string - vals := make(map[string][]string) - for k, vv := range r.req.Header { - if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { - continue // ignored header - } - headers = append(headers, strings.ToLower(k)) - vals[strings.ToLower(k)] = vv - } - headers = append(headers, "host") - sort.Strings(headers) - - var buf bytes.Buffer - for _, k := range headers { - buf.WriteString(k) - buf.WriteByte(':') - switch { - case k == "host": - buf.WriteString(r.req.URL.Host) - fallthrough - default: - for idx, v := range vals[k] { - if idx > 0 { - buf.WriteByte(',') - } - buf.WriteString(v) - } - buf.WriteByte('\n') - } - } - return buf.String() -} - -// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names -func (r *request) getSignedHeaders() string { - var headers []string - for k := range r.req.Header { - if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { - continue // ignored header - } - headers = append(headers, strings.ToLower(k)) - } - headers = append(headers, "host") - sort.Strings(headers) - return strings.Join(headers, ";") -} - -// getCanonicalRequest generate a canonical request of style -// -// canonicalRequest = -// \n -// \n -// \n -// \n -// \n -// -// -func (r *request) getCanonicalRequest(hashedPayload string) string { - r.req.URL.RawQuery = strings.Replace(r.req.URL.Query().Encode(), "+", "%20", -1) - canonicalRequest := strings.Join([]string{ - r.req.Method, - getURLEncodedPath(r.req.URL.Path), - r.req.URL.RawQuery, - r.getCanonicalHeaders(), - r.getSignedHeaders(), - hashedPayload, - }, "\n") - return canonicalRequest -} - -// getStringToSign a string based on selected query values -func (r *request) getStringToSignV4(canonicalRequest string, t time.Time) string { - stringToSign := authHeader + "\n" + t.Format(iso8601DateFormat) + "\n" - stringToSign = stringToSign + getScope(r.config.Region, t) + "\n" - stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) - return stringToSign -} - -// Presign the request, in accordance with - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html -func (r *request) PreSignV4() (string, error) { - if r.config.AccessKeyID == "" && r.config.SecretAccessKey == "" { - return "", errors.New("presign requires accesskey and secretkey") - } - r.SignV4() - return r.req.URL.String(), nil -} - -func (r *request) PostPresignSignatureV4(policyBase64 string, t time.Time) string { - signingkey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t) - signature := getSignature(signingkey, policyBase64) - return signature -} - -// SignV4 the request before Do(), in accordance with - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html -func (r *request) SignV4() { - query := r.req.URL.Query() - if r.expires != 0 { - query.Set("X-Amz-Algorithm", authHeader) - } - t := time.Now().UTC() - // Add date if not present - if r.expires != 0 { - query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) - query.Set("X-Amz-Expires", strconv.FormatInt(r.expires, 10)) - } else { - r.Set("X-Amz-Date", t.Format(iso8601DateFormat)) - } - - hashedPayload := r.getHashedPayload() - signedHeaders := r.getSignedHeaders() - if r.expires != 0 { - query.Set("X-Amz-SignedHeaders", signedHeaders) - } - credential := getCredential(r.config.AccessKeyID, r.config.Region, t) - if r.expires != 0 { - query.Set("X-Amz-Credential", credential) - r.req.URL.RawQuery = query.Encode() - } - canonicalRequest := r.getCanonicalRequest(hashedPayload) - stringToSign := r.getStringToSignV4(canonicalRequest, t) - signingKey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t) - signature := getSignature(signingKey, stringToSign) - - if r.expires != 0 { - r.req.URL.RawQuery += "&X-Amz-Signature=" + signature - } else { - // final Authorization header - parts := []string{ - authHeader + " Credential=" + credential, - "SignedHeaders=" + signedHeaders, - "Signature=" + signature, - } - auth := strings.Join(parts, ", ") - r.Set("Authorization", auth) - } -} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/signature-type.go b/Godeps/_workspace/src/github.com/minio/minio-go/signature-type.go new file mode 100644 index 000000000..8eec3f0eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/signature-type.go @@ -0,0 +1,21 @@ +package minio + +// SignatureType is type of Authorization requested for a given HTTP request. +type SignatureType int + +// Different types of supported signatures - default is Latest i.e SignatureV4. +const ( + Latest SignatureType = iota + SignatureV4 + SignatureV2 +) + +// isV2 - is signature SignatureV2? +func (s SignatureType) isV2() bool { + return s == SignatureV2 +} + +// isV4 - is signature SignatureV4? +func (s SignatureType) isV4() bool { + return s == SignatureV4 || s == Latest +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/tempfile.go b/Godeps/_workspace/src/github.com/minio/minio-go/tempfile.go new file mode 100644 index 000000000..34508569f --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/tempfile.go @@ -0,0 +1,76 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" +) + +// tempFile - temporary file container. +type tempFile struct { + *os.File + mutex *sync.Mutex +} + +// newTempFile returns a new temporary file, once closed it automatically deletes itself. +func newTempFile(prefix string) (*tempFile, error) { + // use platform specific temp directory. + file, err := ioutil.TempFile(os.TempDir(), prefix) + if err != nil { + return nil, err + } + return &tempFile{ + File: file, + mutex: new(sync.Mutex), + }, nil +} + +// cleanupStaleTempFiles - cleanup any stale files present in temp directory at a prefix. +func cleanupStaleTempfiles(prefix string) error { + globPath := filepath.Join(os.TempDir(), prefix) + "*" + staleFiles, err := filepath.Glob(globPath) + if err != nil { + return err + } + for _, staleFile := range staleFiles { + if err := os.Remove(staleFile); err != nil { + return err + } + } + return nil +} + +// Close - closer wrapper to close and remove temporary file. +func (t *tempFile) Close() error { + t.mutex.Lock() + defer t.mutex.Unlock() + if t.File != nil { + // Close the file. + if err := t.File.Close(); err != nil { + return err + } + // Remove file. + if err := os.Remove(t.File.Name()); err != nil { + return err + } + t.File = nil + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/utils.go b/Godeps/_workspace/src/github.com/minio/minio-go/utils.go new file mode 100644 index 000000000..2e2532b6c --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/utils.go @@ -0,0 +1,319 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/hex" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "regexp" + "strings" + "time" + "unicode/utf8" +) + +// isPartUploaded - true if part is already uploaded. +func isPartUploaded(objPart objectPart, objectParts map[int]objectPart) (isUploaded bool) { + _, isUploaded = objectParts[objPart.PartNumber] + if isUploaded { + isUploaded = (objPart.ETag == objectParts[objPart.PartNumber].ETag) + } + return +} + +// getEndpointURL - construct a new endpoint. +func getEndpointURL(endpoint string, inSecure bool) (*url.URL, error) { + if strings.Contains(endpoint, ":") { + host, _, err := net.SplitHostPort(endpoint) + if err != nil { + return nil, err + } + if !isValidIP(host) && !isValidDomain(host) { + msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." + return nil, ErrInvalidArgument(msg) + } + } else { + if !isValidIP(endpoint) && !isValidDomain(endpoint) { + msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." + return nil, ErrInvalidArgument(msg) + } + } + // if inSecure is true, use 'http' scheme. + scheme := "https" + if inSecure { + scheme = "http" + } + + // Construct a secured endpoint URL. + endpointURLStr := scheme + "://" + endpoint + endpointURL, err := url.Parse(endpointURLStr) + if err != nil { + return nil, err + } + + // Validate incoming endpoint URL. + if err := isValidEndpointURL(endpointURL); err != nil { + return nil, err + } + return endpointURL, nil +} + +// isValidDomain validates if input string is a valid domain name. +func isValidDomain(host string) bool { + // See RFC 1035, RFC 3696. + host = strings.TrimSpace(host) + if len(host) == 0 || len(host) > 255 { + return false + } + // host cannot start or end with "-" + if host[len(host)-1:] == "-" || host[:1] == "-" { + return false + } + // host cannot start or end with "_" + if host[len(host)-1:] == "_" || host[:1] == "_" { + return false + } + // host cannot start or end with a "." + if host[len(host)-1:] == "." || host[:1] == "." { + return false + } + // All non alphanumeric characters are invalid. + if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:> 604800 { + return ErrInvalidArgument("Expires cannot be greater than 7 days.") + } + return nil +} + +/// Excerpts from - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html +/// When using virtual hosted–style buckets with SSL, the SSL wild card +/// certificate only matches buckets that do not contain periods. +/// To work around this, use HTTP or write your own certificate verification logic. + +// We decided to not support bucketNames with '.' in them. +var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\-]{1,61}[a-z0-9]$`) + +// isValidBucketName - verify bucket name in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html +func isValidBucketName(bucketName string) error { + if strings.TrimSpace(bucketName) == "" { + return ErrInvalidBucketName("Bucket name cannot be empty.") + } + if len(bucketName) < 3 { + return ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.") + } + if len(bucketName) > 63 { + return ErrInvalidBucketName("Bucket name cannot be greater than 63 characters.") + } + if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' { + return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.") + } + if !validBucketName.MatchString(bucketName) { + return ErrInvalidBucketName("Bucket name contains invalid characters.") + } + return nil +} + +// isValidObjectName - verify object name in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func isValidObjectName(objectName string) error { + if strings.TrimSpace(objectName) == "" { + return ErrInvalidObjectName("Object name cannot be empty.") + } + if len(objectName) > 1024 { + return ErrInvalidObjectName("Object name cannot be greater than 1024 characters.") + } + if !utf8.ValidString(objectName) { + return ErrInvalidBucketName("Object name with non UTF-8 strings are not supported.") + } + return nil +} + +// isValidObjectPrefix - verify if object prefix is valid. +func isValidObjectPrefix(objectPrefix string) error { + if len(objectPrefix) > 1024 { + return ErrInvalidObjectPrefix("Object prefix cannot be greater than 1024 characters.") + } + if !utf8.ValidString(objectPrefix) { + return ErrInvalidObjectPrefix("Object prefix with non UTF-8 strings are not supported.") + } + return nil +} + +// optimalPartSize - calculate the optimal part size for the given objectSize. +// +// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible +// object storage it will have the following parameters as constants. +// +// maxParts - 10000 +// minimumPartSize - 5MiB +// maximumPartSize - 5GiB +// +// if the partSize after division with maxParts is greater than minimumPartSize +// then choose miniumPartSize as the new part size, if not return minimumPartSize. +// +// Special cases +// +// - if input object size is -1 then return maxPartSize. +// - if it happens to be that partSize is indeed bigger +// than the maximum part size just return maxPartSize. +// +func optimalPartSize(objectSize int64) int64 { + // if object size is -1 choose part size as 5GiB. + if objectSize == -1 { + return maxPartSize + } + // make sure last part has enough buffer and handle this poperly. + partSize := (objectSize / (maxParts - 1)) + if partSize > minimumPartSize { + if partSize > maxPartSize { + return maxPartSize + } + return partSize + } + return minimumPartSize +} + +// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func urlEncodePath(pathName string) string { + // if object matches reserved string, no need to encode them + reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + if reservedNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} From 2c15597e249bdce6c55a5957ebbacd4f53bd4e81 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Tue, 29 Dec 2015 00:26:29 +0100 Subject: [PATCH 31/55] walker: print errors --- walk.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/walk.go b/walk.go index 5cceb98b0..699a60071 100644 --- a/walk.go +++ b/walk.go @@ -1,6 +1,8 @@ package restic import ( + "fmt" + "os" "path/filepath" "sync" @@ -66,6 +68,8 @@ func (tw *TreeWalker) walk(path string, tree *Tree, done chan struct{}) { debug.Log("TreeWalker.walk", "start on %q", path) defer debug.Log("TreeWalker.walk", "done for %q", path) + debug.Log("TreeWalker.walk", "tree %#v", tree) + // load all subtrees in parallel results := make([]<-chan loadTreeResult, len(tree.Nodes)) for i, node := range tree.Nodes { @@ -90,7 +94,11 @@ func (tw *TreeWalker) walk(path string, tree *Tree, done chan struct{}) { } res := <-results[i] - tw.walk(p, res.tree, done) + if res.err == nil { + tw.walk(p, res.tree, done) + } else { + fmt.Fprintf(os.Stderr, "error loading tree: %v\n", res.err) + } job = WalkTreeJob{Path: p, Tree: res.tree, Error: res.err} } else { From 407819e5a93b344b79b6fc8bc429b79cf701555c Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Tue, 29 Dec 2015 00:27:29 +0100 Subject: [PATCH 32/55] s3: properly integrate minio-go lib --- backend/s3/config.go | 18 +++++----- backend/s3/config_test.go | 4 +-- backend/s3/s3.go | 74 +++++++++++++++++++++++++-------------- backend/s3_test.go | 22 ++++++++---- location/location_test.go | 6 ++++ 5 files changed, 81 insertions(+), 43 deletions(-) diff --git a/backend/s3/config.go b/backend/s3/config.go index cd4d77b4f..b0224925a 100644 --- a/backend/s3/config.go +++ b/backend/s3/config.go @@ -9,8 +9,8 @@ import ( // Config contains all configuration necessary to connect to an s3 compatible // server. type Config struct { - Region string - URL string + Endpoint string + UseHTTP bool KeyID, Secret string Bucket string } @@ -28,8 +28,8 @@ func ParseConfig(s string) (interface{}, error) { } cfg := Config{ - Region: data[0], - Bucket: data[1], + Endpoint: data[0], + Bucket: data[1], } return cfg, nil @@ -55,7 +55,7 @@ func ParseConfig(s string) (interface{}, error) { if len(rest) == 2 { // assume that just a region name and a bucket has been specified, in // the format region/bucket - cfg.Region = rest[0] + cfg.Endpoint = rest[0] cfg.Bucket = rest[1] } else { // assume that a URL has been specified, parse it and use the path as @@ -69,10 +69,12 @@ func ParseConfig(s string) (interface{}, error) { return nil, errors.New("s3: bucket name not found") } - cfg.Bucket = url.Path[1:] - url.Path = "" + cfg.Endpoint = url.Host + if url.Scheme == "http" { + cfg.UseHTTP = true + } - cfg.URL = url.String() + cfg.Bucket = url.Path[1:] } return cfg, nil diff --git a/backend/s3/config_test.go b/backend/s3/config_test.go index ca71a589f..6b3962c94 100644 --- a/backend/s3/config_test.go +++ b/backend/s3/config_test.go @@ -7,11 +7,11 @@ var configTests = []struct { cfg Config }{ {"s3://eu-central-1/bucketname", Config{ - Region: "eu-central-1", + URL: "eu-central-1", Bucket: "bucketname", }}, {"s3:eu-central-1/foobar", Config{ - Region: "eu-central-1", + URL: "eu-central-1", Bucket: "foobar", }}, {"s3:https://hostname:9999/foobar", Config{ diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 62616a993..6ee309112 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -24,7 +24,7 @@ func s3path(t backend.Type, name string) string { } type S3Backend struct { - s3api minio.API + client minio.CloudStorageClient connChan chan struct{} bucketname string } @@ -32,30 +32,18 @@ type S3Backend struct { // Open opens the S3 backend at bucket and region. The bucket is created if it // does not exist yet. func Open(cfg Config) (backend.Backend, error) { - mcfg := minio.Config{ - AccessKeyID: cfg.KeyID, - SecretAccessKey: cfg.Secret, - } + debug.Log("s3.Open", "open, config %#v", cfg) - if cfg.URL != "" { - mcfg.Endpoint = cfg.URL - } else { - mcfg.Region = cfg.Region - } - - if mcfg.Region == "" { - mcfg.Region = "us-east-1" - } - - s3api, err := minio.New(mcfg) + client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, cfg.UseHTTP) if err != nil { return nil, err } - be := &S3Backend{s3api: s3api, bucketname: cfg.Bucket} + be := &S3Backend{client: client, bucketname: cfg.Bucket} be.createConnections() - err = s3api.MakeBucket(cfg.Bucket, "") + // create new bucket with default ACL in default region + err = client.MakeBucket(cfg.Bucket, "", "") if err != nil { e, ok := err.(minio.ErrorResponse) @@ -123,15 +111,18 @@ func (bb *s3Blob) Finalize(t backend.Type, name string) error { path := s3path(t, name) // Check key does not already exist - _, err := bb.b.s3api.StatObject(bb.b.bucketname, path) + _, err := bb.b.client.StatObject(bb.b.bucketname, path) if err == nil { return errors.New("key already exists") } <-bb.b.connChan - err = bb.b.s3api.PutObject(bb.b.bucketname, path, "binary/octet-stream", int64(bb.buf.Len()), bb.buf) + _, err = bb.b.client.PutObject(bb.b.bucketname, path, bb.buf, int64(bb.buf.Len()), "binary/octet-stream") bb.b.connChan <- struct{}{} bb.buf.Reset() + + debug.Log("s3.Finalize", "finalized %v -> err %v", path, err) + return err } @@ -150,23 +141,50 @@ func (be *S3Backend) Create() (backend.Blob, error) { // name. The reader should be closed after draining it. func (be *S3Backend) Get(t backend.Type, name string) (io.ReadCloser, error) { path := s3path(t, name) - rc, _, err := be.s3api.GetObject(be.bucketname, path) + rc, _, err := be.client.GetObject(be.bucketname, path) + debug.Log("s3.Get", "%v %v -> err %v", t, name, err) return rc, err } // GetReader returns an io.ReadCloser for the Blob with the given name of // type t at offset and length. If length is 0, the reader reads until EOF. func (be *S3Backend) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) { + debug.Log("s3.GetReader", "%v %v, offset %v len %v", t, name, offset, length) path := s3path(t, name) - rc, _, err := be.s3api.GetPartialObject(be.bucketname, path, int64(offset), int64(length)) - return rc, err + rd, stat, err := be.client.GetObjectPartial(be.bucketname, path) + debug.Log("s3.GetReader", " stat %v, err %v", stat, err) + if err != nil { + return nil, err + } + + l, o := int64(length), int64(offset) + + if l == 0 { + l = stat.Size - o + } + + if l > stat.Size-o { + l = stat.Size - o + } + + debug.Log("s3.GetReader", "%v %v, o %v l %v", t, name, o, l) + + buf := make([]byte, l) + n, err := rd.ReadAt(buf, o) + debug.Log("s3.GetReader", " -> n %v err %v", n, err) + if err == io.EOF && int64(n) == l { + debug.Log("s3.GetReader", " ignoring EOF error") + err = nil + } + + return backend.ReadCloser(bytes.NewReader(buf[:n])), err } // Test returns true if a blob of the given type and name exists in the backend. func (be *S3Backend) Test(t backend.Type, name string) (bool, error) { found := false path := s3path(t, name) - _, err := be.s3api.StatObject(be.bucketname, path) + _, err := be.client.StatObject(be.bucketname, path) if err == nil { found = true } @@ -178,7 +196,9 @@ func (be *S3Backend) Test(t backend.Type, name string) (bool, error) { // Remove removes the blob with the given name and type. func (be *S3Backend) Remove(t backend.Type, name string) error { path := s3path(t, name) - return be.s3api.RemoveObject(be.bucketname, path) + err := be.client.RemoveObject(be.bucketname, path) + debug.Log("s3.Remove", "%v %v -> err %v", t, name, err) + return err } // List returns a channel that yields all names of blobs of type t. A @@ -189,12 +209,12 @@ func (be *S3Backend) List(t backend.Type, done <-chan struct{}) <-chan string { prefix := s3path(t, "") - listresp := be.s3api.ListObjects(be.bucketname, prefix, true) + listresp := be.client.ListObjects(be.bucketname, prefix, true, done) go func() { defer close(ch) for obj := range listresp { - m := strings.TrimPrefix(obj.Stat.Key, prefix) + m := strings.TrimPrefix(obj.Key, prefix) if m == "" { continue } diff --git a/backend/s3_test.go b/backend/s3_test.go index b177ad067..6d79f9cd8 100644 --- a/backend/s3_test.go +++ b/backend/s3_test.go @@ -1,6 +1,7 @@ package backend_test import ( + "net/url" "os" "testing" @@ -17,12 +18,21 @@ func TestS3Backend(t *testing.T) { t.Skip("s3 test server not available") } - be, err := s3.Open(s3.Config{ - URL: TestS3Server, - Bucket: "restictestbucket", - KeyID: os.Getenv("AWS_ACCESS_KEY_ID"), - Secret: os.Getenv("AWS_SECRET_ACCESS_KEY"), - }) + url, err := url.Parse(TestS3Server) + OK(t, err) + + cfg := s3.Config{ + Endpoint: url.Host, + Bucket: "restictestbucket", + KeyID: os.Getenv("AWS_ACCESS_KEY_ID"), + Secret: os.Getenv("AWS_SECRET_ACCESS_KEY"), + } + + if url.Scheme == "http" { + cfg.UseHTTP = true + } + + be, err := s3.Open(cfg) OK(t, err) testBackend(be, t) diff --git a/location/location_test.go b/location/location_test.go index 405ba0144..702b2651e 100644 --- a/location/location_test.go +++ b/location/location_test.go @@ -56,6 +56,12 @@ var parseTests = []struct { Bucket: "bucketname", }}, }, + {"s3:eu-central-1/repo", Location{Scheme: "s3", + Config: s3.Config{ + Region: "eu-central-1", + Bucket: "repo", + }}, + }, {"s3:https://hostname.foo/repo", Location{Scheme: "s3", Config: s3.Config{ URL: "https://hostname.foo", From d79c85af62fd9156b56c247d14342792e625ec3b Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Tue, 29 Dec 2015 11:24:08 +0100 Subject: [PATCH 33/55] Fix s3 tests --- backend/s3/config_test.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/backend/s3/config_test.go b/backend/s3/config_test.go index 6b3962c94..54fc4718a 100644 --- a/backend/s3/config_test.go +++ b/backend/s3/config_test.go @@ -7,20 +7,21 @@ var configTests = []struct { cfg Config }{ {"s3://eu-central-1/bucketname", Config{ - URL: "eu-central-1", - Bucket: "bucketname", + Endpoint: "eu-central-1", + Bucket: "bucketname", }}, {"s3:eu-central-1/foobar", Config{ - URL: "eu-central-1", - Bucket: "foobar", + Endpoint: "eu-central-1", + Bucket: "foobar", }}, {"s3:https://hostname:9999/foobar", Config{ - URL: "https://hostname:9999", - Bucket: "foobar", + Endpoint: "hostname:9999", + Bucket: "foobar", }}, {"s3:http://hostname:9999/foobar", Config{ - URL: "http://hostname:9999", - Bucket: "foobar", + Endpoint: "hostname:9999", + Bucket: "foobar", + UseHTTP: true, }}, } From a73c4bd5a73b74d7fd004723430f5d20a14797e8 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 30 Dec 2015 12:19:19 +0100 Subject: [PATCH 34/55] update s3 library for bugfix --- Godeps/Godeps.json | 4 +- .../src/github.com/minio/minio-go/api-get.go | 44 +-- .../src/github.com/minio/minio-go/api-list.go | 8 +- .../minio/minio-go/api-put-bucket.go | 4 +- .../minio/minio-go/api-put-object.go | 8 +- .../github.com/minio/minio-go/api-remove.go | 7 +- .../src/github.com/minio/minio-go/api-stat.go | 4 +- .../src/github.com/minio/minio-go/api.go | 92 ++++++ .../minio/minio-go/api_functional_test.go | 270 ++++++++++++++++-- .../github.com/minio/minio-go/appveyor.yml | 2 - .../github.com/minio/minio-go/bucket-cache.go | 2 +- 11 files changed, 386 insertions(+), 59 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 3cb12b7a8..dfcfdce88 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -24,8 +24,8 @@ }, { "ImportPath": "github.com/minio/minio-go", - "Comment": "v0.2.5-177-g691a38d", - "Rev": "691a38d161d6dfc0e8e78dc5360bc39f48a8626d" + "Comment": "v0.2.5-185-g654a97a", + "Rev": "654a97a4d165dabf422bec2ef6673bcd9d3daf00" }, { "ImportPath": "github.com/pkg/sftp", diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go index b331fb44c..d52beb453 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go @@ -17,6 +17,7 @@ package minio import ( + "bytes" "errors" "fmt" "io" @@ -55,7 +56,7 @@ func (c Client) GetBucketACL(bucketName string) (BucketACL, error) { } // Initiate the request. - resp, err := c.httpClient.Do(req) + resp, err := c.do(req) defer closeResponse(resp) if err != nil { return "", err @@ -185,7 +186,7 @@ func (c Client) GetObjectPartial(bucketName, objectName string) (ReadAtCloser, O // Get shortest length. // NOTE: Last remaining bytes are usually smaller than // req.Buffer size. Use that as the final length. - length := math.Min(float64(len(req.Buffer)), float64(objectStat.Size-req.Offset)) + length := math.Min(float64(req.Buffer.Len()), float64(objectStat.Size-req.Offset)) httpReader, _, err := c.getObject(bucketName, objectName, req.Offset, int64(length)) if err != nil { resCh <- readAtResponse{ @@ -193,9 +194,9 @@ func (c Client) GetObjectPartial(bucketName, objectName string) (ReadAtCloser, O } return } - size, err := httpReader.Read(req.Buffer) + size, err := io.CopyN(req.Buffer, httpReader, int64(length)) resCh <- readAtResponse{ - Size: size, + Size: int(size), Error: err, } } @@ -213,8 +214,8 @@ type readAtResponse struct { // request message container to communicate with internal go-routine. type readAtRequest struct { - Buffer []byte // requested bytes. - Offset int64 // readAt offset. + Buffer *bytes.Buffer + Offset int64 // readAt offset. } // objectReadAtCloser container for io.ReadAtCloser. @@ -247,11 +248,16 @@ func newObjectReadAtCloser(reqCh chan<- readAtRequest, resCh <-chan readAtRespon // It returns the number of bytes read and the error, if any. // ReadAt always returns a non-nil error when n < len(b). // At end of file, that error is io.EOF. -func (r *objectReadAtCloser) ReadAt(p []byte, offset int64) (int, error) { +func (r *objectReadAtCloser) ReadAt(b []byte, offset int64) (int, error) { // Locking. r.mutex.Lock() defer r.mutex.Unlock() + // if offset is negative and offset is greater than or equal to object size we return EOF. + if offset < 0 || offset >= r.objectSize { + return 0, io.EOF + } + // prevErr is which was saved in previous operation. if r.prevErr != nil { return 0, r.prevErr @@ -261,7 +267,7 @@ func (r *objectReadAtCloser) ReadAt(p []byte, offset int64) (int, error) { reqMsg := readAtRequest{} // Send the current offset and bytes requested. - reqMsg.Buffer = p + reqMsg.Buffer = bytes.NewBuffer(b) reqMsg.Offset = offset // Send read request over the control channel. @@ -270,15 +276,21 @@ func (r *objectReadAtCloser) ReadAt(p []byte, offset int64) (int, error) { // Get data over the response channel. dataMsg := <-r.resCh + // Bytes read. + bytesRead := int64(dataMsg.Size) + + if dataMsg.Error == nil { + // If offset+bytes read is equal to objectSize + // we have reached end of file, we return io.EOF. + if offset+bytesRead == r.objectSize { + return dataMsg.Size, io.EOF + } + return dataMsg.Size, nil + } + // Save any error. r.prevErr = dataMsg.Error - if dataMsg.Error != nil { - if dataMsg.Error == io.EOF { - return dataMsg.Size, dataMsg.Error - } - return 0, dataMsg.Error - } - return dataMsg.Size, nil + return dataMsg.Size, dataMsg.Error } // Closer is the interface that wraps the basic Close method. @@ -340,7 +352,7 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) ( return nil, ObjectStat{}, err } // Execute the request. - resp, err := c.httpClient.Do(req) + resp, err := c.do(req) if err != nil { return nil, ObjectStat{}, err } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-list.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-list.go index 180a28a9a..4de5da89d 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-list.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-list.go @@ -39,7 +39,7 @@ func (c Client) ListBuckets() ([]BucketStat, error) { return nil, err } // Initiate the request. - resp, err := c.httpClient.Do(req) + resp, err := c.do(req) defer closeResponse(resp) if err != nil { return nil, err @@ -197,7 +197,7 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit return listBucketResult{}, err } // Execute list buckets. - resp, err := c.httpClient.Do(req) + resp, err := c.do(req) defer closeResponse(resp) if err != nil { return listBucketResult{}, err @@ -361,7 +361,7 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, return listMultipartUploadsResult{}, err } // Execute list multipart uploads request. - resp, err := c.httpClient.Do(req) + resp, err := c.do(req) defer closeResponse(resp) if err != nil { return listMultipartUploadsResult{}, err @@ -466,7 +466,7 @@ func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, pa return listObjectPartsResult{}, err } // Exectue list object parts. - resp, err := c.httpClient.Do(req) + resp, err := c.do(req) defer closeResponse(resp) if err != nil { return listObjectPartsResult{}, err diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go index 97f54f782..6293e64f3 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go @@ -67,7 +67,7 @@ func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) er } // Execute the request. - resp, err := c.httpClient.Do(req) + resp, err := c.do(req) defer closeResponse(resp) if err != nil { return err @@ -201,7 +201,7 @@ func (c Client) SetBucketACL(bucketName string, acl BucketACL) error { } // Initiate the request. - resp, err := c.httpClient.Do(req) + resp, err := c.do(req) defer closeResponse(resp) if err != nil { return err diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go index a02df778a..300ed4b40 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go @@ -379,7 +379,7 @@ func (c Client) putObject(bucketName, objectName string, putObjMetadata putObjec return ObjectStat{}, err } // Execute the request. - resp, err := c.httpClient.Do(req) + resp, err := c.do(req) defer closeResponse(resp) if err != nil { return ObjectStat{}, err @@ -432,7 +432,7 @@ func (c Client) initiateMultipartUpload(bucketName, objectName, contentType stri return initiateMultipartUploadResult{}, err } // Execute the request. - resp, err := c.httpClient.Do(req) + resp, err := c.do(req) defer closeResponse(resp) if err != nil { return initiateMultipartUploadResult{}, err @@ -484,7 +484,7 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, uploadingPar return objectPart{}, err } // Execute the request. - resp, err := c.httpClient.Do(req) + resp, err := c.do(req) defer closeResponse(resp) if err != nil { return objectPart{}, err @@ -539,7 +539,7 @@ func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, } // Execute the request. - resp, err := c.httpClient.Do(req) + resp, err := c.do(req) defer closeResponse(resp) if err != nil { return completeMultipartUploadResult{}, err diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-remove.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-remove.go index 0330c9538..0e1abc2e3 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-remove.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-remove.go @@ -35,7 +35,7 @@ func (c Client) RemoveBucket(bucketName string) error { if err != nil { return err } - resp, err := c.httpClient.Do(req) + resp, err := c.do(req) defer closeResponse(resp) if err != nil { return err @@ -67,7 +67,7 @@ func (c Client) RemoveObject(bucketName, objectName string) error { if err != nil { return err } - resp, err := c.httpClient.Do(req) + resp, err := c.do(req) defer closeResponse(resp) if err != nil { return err @@ -137,8 +137,9 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er if err != nil { return err } + // execute the request. - resp, err := c.httpClient.Do(req) + resp, err := c.do(req) defer closeResponse(resp) if err != nil { return err diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go index 29bd83fd9..9c5e96cf3 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go @@ -34,7 +34,7 @@ func (c Client) BucketExists(bucketName string) error { if err != nil { return err } - resp, err := c.httpClient.Do(req) + resp, err := c.do(req) defer closeResponse(resp) if err != nil { return err @@ -63,7 +63,7 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectStat, error) { if err != nil { return ObjectStat{}, err } - resp, err := c.httpClient.Do(req) + resp, err := c.do(req) defer closeResponse(resp) if err != nil { return ObjectStat{}, err diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api.go b/Godeps/_workspace/src/github.com/minio/minio-go/api.go index 788a74d4d..f74bf2036 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api.go @@ -19,10 +19,14 @@ package minio import ( "encoding/base64" "encoding/hex" + "fmt" "io" "net/http" + "net/http/httputil" "net/url" + "os" "runtime" + "strings" "time" ) @@ -44,6 +48,10 @@ type Client struct { // Needs allocation. httpClient *http.Client bucketLocCache *bucketLocationCache + + // Advanced functionality + isTraceEnabled bool + traceOutput io.Writer } // Global constants. @@ -159,6 +167,26 @@ func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) { } } +// TraceOn - enable HTTP tracing. +func (c *Client) TraceOn(outputStream io.Writer) error { + // if outputStream is nil then default to os.Stdout. + if outputStream == nil { + outputStream = os.Stdout + } + // Sets a new output stream. + c.traceOutput = outputStream + + // Enable tracing. + c.isTraceEnabled = true + return nil +} + +// TraceOff - disable HTTP tracing. +func (c *Client) TraceOff() { + // Disable tracing. + c.isTraceEnabled = false +} + // requestMetadata - is container for all the values to make a request. type requestMetadata struct { // If set newRequest presigns the URL. @@ -178,6 +206,66 @@ type requestMetadata struct { contentMD5Bytes []byte } +// dumpHTTP - dump HTTP request and response. +func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { + // Starts http dump. + _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") + if err != nil { + return err + } + + // Only display request header. + reqTrace, err := httputil.DumpRequestOut(req, false) + if err != nil { + return err + } + + // Write request to trace output. + _, err = fmt.Fprint(c.traceOutput, string(reqTrace)) + if err != nil { + return err + } + + // Only display response header. + respTrace, err := httputil.DumpResponse(resp, false) + if err != nil { + return err + } + + // Write response to trace output. + _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) + if err != nil { + return err + } + + // Ends the http dump. + _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") + if err != nil { + return err + } + + // Returns success. + return nil +} + +// do - execute http request. +func (c Client) do(req *http.Request) (*http.Response, error) { + // execute the request. + resp, err := c.httpClient.Do(req) + if err != nil { + return resp, err + } + // If trace is enabled, dump http request and response. + if c.isTraceEnabled { + err = c.dumpHTTP(req, resp) + if err != nil { + return nil, err + } + } + return resp, nil +} + +// newRequest - instantiate a new HTTP request for a given method. func (c Client) newRequest(method string, metadata requestMetadata) (*http.Request, error) { // If no method is supplied default to 'POST'. if method == "" { @@ -344,4 +432,8 @@ type CloudStorageClient interface { // Set custom transport. SetCustomTransport(customTransport http.RoundTripper) + + // HTTP tracing methods. + TraceOn(traceOutput io.Writer) error + TraceOff() } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go index 9e1d60cf9..5705d729b 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go @@ -1,9 +1,27 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package minio_test import ( "bytes" + "io" "io/ioutil" "math/rand" + "net/http" "os" "testing" "time" @@ -35,7 +53,11 @@ func randString(n int, src rand.Source) string { return string(b[0:30]) } -func TestFunctional(t *testing.T) { +func TestGetObjectPartialFunctional(t *testing.T) { + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. c, err := minio.New( "play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", @@ -47,55 +69,204 @@ func TestFunctional(t *testing.T) { } // Set user agent. - c.SetAppInfo("Test", "0.1.0") + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + // Enable tracing, write to stdout. + // c.TraceOn(nil) + + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // make a new bucket. err = c.MakeBucket(bucketName, "private", "us-east-1") if err != nil { t.Fatal("Error:", err, bucketName) } + // generate data + buf := make([]byte, rand.Intn(1<<20)) + + // save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano())) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "binary/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + // read the data back + r, st, err := c.GetObjectPartial(bucketName, objectName) + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + if st.Size != int64(len(buf)) { + t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n", + len(buf), st.Size) + } + + offset := int64(2048) + + // read directly + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + m, err := r.ReadAt(buf2, offset) + if err != nil { + t.Fatal("Error:", err, st.Size, len(buf2), offset) + } + if m != len(buf2) { + t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf2)) + } + m, err = r.ReadAt(buf3, offset) + if err != nil { + t.Fatal("Error:", err, st.Size, len(buf3), offset) + } + if m != len(buf3) { + t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf3)) + } + if !bytes.Equal(buf2, buf3) { + t.Fatal("Error: Incorrect read between two ReadAt from same offset.") + } + m, err = r.ReadAt(buf4, offset) + if err != nil { + t.Fatal("Error:", err, st.Size, len(buf4), offset) + } + if m != len(buf4) { + t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf4)) + } + if !bytes.Equal(buf2, buf4) { + t.Fatal("Error: Incorrect read between two ReadAt from same offset.") + } + + buf5 := make([]byte, n) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + t.Fatal("Error:", err, len(buf5)) + } + } + if m != len(buf5) { + t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf5)) + } + if !bytes.Equal(buf, buf5) { + t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.") + } + + buf6 := make([]byte, n+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + t.Fatal("Error:", err, len(buf6)) + } + } +} + +func TestFunctional(t *testing.T) { + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New( + "play.minio.io:9002", + "Q3AM3UQ867SPQQA43P2F", + "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(nil) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // generate a random file name. fileName := randString(60, rand.NewSource(time.Now().UnixNano())) file, err := os.Create(fileName) if err != nil { t.Fatal("Error:", err) } - for i := 0; i < 10; i++ { - file.WriteString(fileName) + var totalSize int64 + for i := 0; i < 3; i++ { + buf := make([]byte, rand.Intn(1<<19)) + n, err := file.Write(buf) + if err != nil { + t.Fatal("Error:", err) + } + totalSize += int64(n) } file.Close() + // verify if bucket exits and you have access. err = c.BucketExists(bucketName) if err != nil { t.Fatal("Error:", err, bucketName) } + // make the bucket 'public read/write'. err = c.SetBucketACL(bucketName, "public-read-write") if err != nil { t.Fatal("Error:", err) } + // get the previously set acl. acl, err := c.GetBucketACL(bucketName) if err != nil { t.Fatal("Error:", err) } + + // acl must be 'public read/write'. if acl != minio.BucketACL("public-read-write") { t.Fatal("Error:", acl) } - _, err = c.ListBuckets() + // list all buckets. + buckets, err := c.ListBuckets() if err != nil { t.Fatal("Error:", err) } - objectName := bucketName + "Minio" - reader := bytes.NewReader([]byte("Hello World!")) + // Verify if previously created bucket is listed in list buckets. + bucketFound := false + for _, bucket := range buckets { + if bucket.Name == bucketName { + bucketFound = true + } + } + + // If bucket not found error out. + if !bucketFound { + t.Fatal("Error: bucket ", bucketName, "not found") + } + + objectName := bucketName + "unique" + + // generate data + buf := make([]byte, rand.Intn(1<<19)) + reader := bytes.NewReader(buf) n, err := c.PutObject(bucketName, objectName, reader, int64(reader.Len()), "") if err != nil { t.Fatal("Error: ", err) } - if n != int64(len([]byte("Hello World!"))) { + if n != int64(len(buf)) { t.Fatal("Error: bad length ", n, reader.Len()) } @@ -104,26 +275,75 @@ func TestFunctional(t *testing.T) { t.Fatal("Error: ", err) } - n, err = c.FPutObject(bucketName, objectName+"-f", fileName, "text/plain") - if err != nil { - t.Fatal("Error: ", err) - } - if n != int64(10*len(fileName)) { - t.Fatal("Error: bad length ", n, int64(10*len(fileName))) - } - - err = c.FGetObject(bucketName, objectName+"-f", fileName+"-f") - if err != nil { - t.Fatal("Error: ", err) - } - newReadBytes, err := ioutil.ReadAll(newReader) if err != nil { t.Fatal("Error: ", err) } - if !bytes.Equal(newReadBytes, []byte("Hello World!")) { - t.Fatal("Error: bytes invalid.") + if !bytes.Equal(newReadBytes, buf) { + t.Fatal("Error: bytes mismatch.") + } + + n, err = c.FPutObject(bucketName, objectName+"-f", fileName, "text/plain") + if err != nil { + t.Fatal("Error: ", err) + } + if n != totalSize { + t.Fatal("Error: bad length ", n, totalSize) + } + + err = c.FGetObject(bucketName, objectName+"-f", fileName+"-f") + if err != nil { + t.Fatal("Error: ", err) + } + + presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second) + if err != nil { + t.Fatal("Error: ", err) + } + + resp, err := http.Get(presignedGetURL) + if err != nil { + t.Fatal("Error: ", err) + } + if resp.StatusCode != http.StatusOK { + t.Fatal("Error: ", resp.Status) + } + newPresignedBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal("Error: ", err) + } + if !bytes.Equal(newPresignedBytes, buf) { + t.Fatal("Error: bytes mismatch.") + } + + presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second) + if err != nil { + t.Fatal("Error: ", err) + } + buf = make([]byte, rand.Intn(1<<20)) + req, err := http.NewRequest("PUT", presignedPutURL, bytes.NewReader(buf)) + if err != nil { + t.Fatal("Error: ", err) + } + httpClient := &http.Client{} + resp, err = httpClient.Do(req) + if err != nil { + t.Fatal("Error: ", err) + } + + newReader, _, err = c.GetObject(bucketName, objectName+"-presigned") + if err != nil { + t.Fatal("Error: ", err) + } + + newReadBytes, err = ioutil.ReadAll(newReader) + if err != nil { + t.Fatal("Error: ", err) + } + + if !bytes.Equal(newReadBytes, buf) { + t.Fatal("Error: bytes mismatch.") } err = c.RemoveObject(bucketName, objectName) @@ -134,6 +354,10 @@ func TestFunctional(t *testing.T) { if err != nil { t.Fatal("Error: ", err) } + err = c.RemoveObject(bucketName, objectName+"-presigned") + if err != nil { + t.Fatal("Error: ", err) + } err = c.RemoveBucket(bucketName) if err != nil { diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml b/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml index 7f624a459..963698a04 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml +++ b/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml @@ -18,7 +18,6 @@ install: - go env - go get -u github.com/golang/lint/golint - go get -u golang.org/x/tools/cmd/vet - - go get -u github.com/fzipp/gocyclo - go get -u github.com/remyoudompheng/go-misc/deadcode # to run your custom scripts instead of automatic MSBuild @@ -26,7 +25,6 @@ build_script: - go vet ./... - gofmt -s -l . - golint github.com/minio/minio-go... - - gocyclo -over 30 . - deadcode - go test - go test -race diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/bucket-cache.go b/Godeps/_workspace/src/github.com/minio/minio-go/bucket-cache.go index 29fb6aa36..d0993ba4a 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/bucket-cache.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/bucket-cache.go @@ -82,7 +82,7 @@ func (c Client) getBucketLocation(bucketName string) (string, error) { } // Initiate the request. - resp, err := c.httpClient.Do(req) + resp, err := c.do(req) defer closeResponse(resp) if err != nil { return "", err From f3193541749281b1d067839d2d812bc056789d05 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 2 Jan 2016 14:09:06 +0100 Subject: [PATCH 35/55] Update s3 library again --- Godeps/Godeps.json | 4 ++-- .../src/github.com/minio/minio-go/api-get.go | 14 +++++++++----- .../minio/minio-go/api_functional_test.go | 19 +++++++++++++++---- 3 files changed, 26 insertions(+), 11 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index dfcfdce88..c6e5457c1 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -24,8 +24,8 @@ }, { "ImportPath": "github.com/minio/minio-go", - "Comment": "v0.2.5-185-g654a97a", - "Rev": "654a97a4d165dabf422bec2ef6673bcd9d3daf00" + "Comment": "v0.2.5-187-gad1597d", + "Rev": "ad1597d864f56f608f8a1694ae9b5970fef57eb6" }, { "ImportPath": "github.com/pkg/sftp", diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go index d52beb453..e35dcf930 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go @@ -17,7 +17,6 @@ package minio import ( - "bytes" "errors" "fmt" "io" @@ -186,7 +185,7 @@ func (c Client) GetObjectPartial(bucketName, objectName string) (ReadAtCloser, O // Get shortest length. // NOTE: Last remaining bytes are usually smaller than // req.Buffer size. Use that as the final length. - length := math.Min(float64(req.Buffer.Len()), float64(objectStat.Size-req.Offset)) + length := math.Min(float64(len(req.Buffer)), float64(objectStat.Size-req.Offset)) httpReader, _, err := c.getObject(bucketName, objectName, req.Offset, int64(length)) if err != nil { resCh <- readAtResponse{ @@ -194,7 +193,12 @@ func (c Client) GetObjectPartial(bucketName, objectName string) (ReadAtCloser, O } return } - size, err := io.CopyN(req.Buffer, httpReader, int64(length)) + size, err := io.ReadFull(httpReader, req.Buffer) + if err == io.ErrUnexpectedEOF { + // If an EOF happens after reading some but not all the bytes + // ReadFull returns ErrUnexpectedEOF + err = io.EOF + } resCh <- readAtResponse{ Size: int(size), Error: err, @@ -214,7 +218,7 @@ type readAtResponse struct { // request message container to communicate with internal go-routine. type readAtRequest struct { - Buffer *bytes.Buffer + Buffer []byte Offset int64 // readAt offset. } @@ -267,7 +271,7 @@ func (r *objectReadAtCloser) ReadAt(b []byte, offset int64) (int, error) { reqMsg := readAtRequest{} // Send the current offset and bytes requested. - reqMsg.Buffer = bytes.NewBuffer(b) + reqMsg.Buffer = b reqMsg.Offset = offset // Send read request over the control channel. diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go index 5705d729b..886959de3 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go @@ -18,6 +18,7 @@ package minio_test import ( "bytes" + crand "crypto/rand" "io" "io/ioutil" "math/rand" @@ -83,8 +84,13 @@ func TestGetObjectPartialFunctional(t *testing.T) { t.Fatal("Error:", err, bucketName) } - // generate data - buf := make([]byte, rand.Intn(1<<20)) + // generate data more than 32K + buf := make([]byte, rand.Intn(1<<20)+32*1024) + + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error:", err) + } // save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano())) @@ -122,6 +128,10 @@ func TestGetObjectPartialFunctional(t *testing.T) { if m != len(buf2) { t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf2)) } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + t.Fatal("Error: Incorrect read between two ReadAt from same offset.") + } + offset += 512 m, err = r.ReadAt(buf3, offset) if err != nil { t.Fatal("Error:", err, st.Size, len(buf3), offset) @@ -129,9 +139,10 @@ func TestGetObjectPartialFunctional(t *testing.T) { if m != len(buf3) { t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf3)) } - if !bytes.Equal(buf2, buf3) { + if !bytes.Equal(buf3, buf[offset:offset+512]) { t.Fatal("Error: Incorrect read between two ReadAt from same offset.") } + offset += 512 m, err = r.ReadAt(buf4, offset) if err != nil { t.Fatal("Error:", err, st.Size, len(buf4), offset) @@ -139,7 +150,7 @@ func TestGetObjectPartialFunctional(t *testing.T) { if m != len(buf4) { t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf4)) } - if !bytes.Equal(buf2, buf4) { + if !bytes.Equal(buf4, buf[offset:offset+512]) { t.Fatal("Error: Incorrect read between two ReadAt from same offset.") } From e0361b1f9ff1f733aac79b7a222a99c003b7ffa7 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 2 Jan 2016 14:09:33 +0100 Subject: [PATCH 36/55] Add ContinuousReader --- backend/s3/cont_reader.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 backend/s3/cont_reader.go diff --git a/backend/s3/cont_reader.go b/backend/s3/cont_reader.go new file mode 100644 index 000000000..da0ef4000 --- /dev/null +++ b/backend/s3/cont_reader.go @@ -0,0 +1,23 @@ +package s3 + +import ( + "fmt" + "io" +) + +// ContinuousReader implements an io.Reader on top of an io.ReaderAt, advancing +// an offset. +type ContinuousReader struct { + R io.ReaderAt + Offset int64 +} + +func (c *ContinuousReader) Read(p []byte) (int, error) { + fmt.Printf("ContinuousReader %p: ReadAt(offset %v)\n", c, c.Offset) + n, err := c.R.ReadAt(p, c.Offset) + fmt.Printf("ContinuousReader %p: len(p) = %v, n %v, err %v\n", + c, len(p), n, err) + fmt.Printf(" %02x\n", p[:n]) + c.Offset += int64(n) + return n, err +} From 5071f28d55978648ec138f58b3f393f5c0789cd8 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 2 Jan 2016 14:10:38 +0100 Subject: [PATCH 37/55] ReadCloser: Call close if reader implements it --- backend/mem_backend.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/backend/mem_backend.go b/backend/mem_backend.go index 21d78846b..e757566e6 100644 --- a/backend/mem_backend.go +++ b/backend/mem_backend.go @@ -125,17 +125,21 @@ func memCreate(be *MemoryBackend) (Blob, error) { return blob, nil } -// ReadCloser wraps a reader and adds a noop Close method. +// ReadCloser wraps a reader and adds a noop Close method if rd does not implement io.Closer. func ReadCloser(rd io.Reader) io.ReadCloser { return readCloser{rd} } -// readCloser wraps a reader and adds a noop Close method. +// readCloser wraps a reader and adds a noop Close method if rd does not implement io.Closer. type readCloser struct { io.Reader } func (rd readCloser) Close() error { + if r, ok := rd.Reader.(io.Closer); ok { + return r.Close() + } + return nil } From a850041cf0db241819e8e0993f64d9e27210d339 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 2 Jan 2016 14:12:51 +0100 Subject: [PATCH 38/55] ContReader: Remove debug output --- backend/s3/cont_reader.go | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/backend/s3/cont_reader.go b/backend/s3/cont_reader.go index da0ef4000..77bd1dca1 100644 --- a/backend/s3/cont_reader.go +++ b/backend/s3/cont_reader.go @@ -1,9 +1,6 @@ package s3 -import ( - "fmt" - "io" -) +import "io" // ContinuousReader implements an io.Reader on top of an io.ReaderAt, advancing // an offset. @@ -13,11 +10,7 @@ type ContinuousReader struct { } func (c *ContinuousReader) Read(p []byte) (int, error) { - fmt.Printf("ContinuousReader %p: ReadAt(offset %v)\n", c, c.Offset) n, err := c.R.ReadAt(p, c.Offset) - fmt.Printf("ContinuousReader %p: len(p) = %v, n %v, err %v\n", - c, len(p), n, err) - fmt.Printf(" %02x\n", p[:n]) c.Offset += int64(n) return n, err } From 0237b0d9724eeeae6a86db189400f972cee4cd31 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 2 Jan 2016 14:37:08 +0100 Subject: [PATCH 39/55] Update s3 library again --- Godeps/Godeps.json | 4 +- .../src/github.com/minio/minio-go/.travis.yml | 2 +- .../minio/minio-go/api-definitions.go | 10 +- .../minio/minio-go/api-error-response.go | 10 + .../minio/minio-go/api-fput-object.go | 65 +++--- .../src/github.com/minio/minio-go/api-get.go | 10 +- .../src/github.com/minio/minio-go/api-list.go | 4 + .../minio/minio-go/api-put-object-partial.go | 199 +++++++++++++++++- .../minio/minio-go/api-put-object.go | 146 +++++++------ .../minio/minio-go/api-s3-definitions.go | 3 - .../src/github.com/minio/minio-go/api-stat.go | 7 +- .../minio/minio-go/api_functional_test.go | 174 ++++++++++++++- .../github.com/minio/minio-go/appveyor.yml | 2 +- .../src/github.com/minio/minio-go/tempfile.go | 16 -- 14 files changed, 505 insertions(+), 147 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index c6e5457c1..97d89996a 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -24,8 +24,8 @@ }, { "ImportPath": "github.com/minio/minio-go", - "Comment": "v0.2.5-187-gad1597d", - "Rev": "ad1597d864f56f608f8a1694ae9b5970fef57eb6" + "Comment": "v0.2.5-195-gf30b6ca", + "Rev": "f30b6ca90bfda7578f6a11b7ba6af2eae7b0510c" }, { "ImportPath": "github.com/pkg/sftp", diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/.travis.yml b/Godeps/_workspace/src/github.com/minio/minio-go/.travis.yml index 01078a5e7..c55421487 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/.travis.yml +++ b/Godeps/_workspace/src/github.com/minio/minio-go/.travis.yml @@ -3,7 +3,7 @@ go: - 1.5.1 script: - go vet ./... -- go test -race -v ./... +- go test -test.short -race -v ./... notifications: slack: secure: HrOX2k6F/sEl6Rr4m5vHOdJCIwV42be0kz1Jy/WSMvrl/fQ8YkldKviLeWh4aWt1kclsYhNQ4FqGML+RIZYsdOqej4fAw9Vi5pZkI1MzPJq0UjrtMqkqzvD90eDGQYCKwaXjEIN8cohwJeb6X0B0HKAd9sqJW5GH5SwnhH5WWP8= diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-definitions.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-definitions.go index 7667645a1..123de1850 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-definitions.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-definitions.go @@ -71,20 +71,20 @@ type ObjectMultipartStat struct { Err error } -// partMetadata - container for each partMetadata. -type partMetadata struct { +// partData - container for each part. +type partData struct { MD5Sum []byte Sha256Sum []byte ReadCloser io.ReadCloser Size int64 - Number int // partMetadata number. + Number int // partData number. // Error Err error } -// putObjectMetadata - container for each single PUT operation. -type putObjectMetadata struct { +// putObjectData - container for each single PUT operation. +type putObjectData struct { MD5Sum []byte Sha256Sum []byte ReadCloser io.ReadCloser diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-error-response.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-error-response.go index 0d2496507..4d7e30fc1 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-error-response.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-error-response.go @@ -218,6 +218,16 @@ func ErrInvalidObjectName(message string) error { } } +// ErrInvalidParts - invalid number of parts. +func ErrInvalidParts(expectedParts, uploadedParts int) error { + msg := fmt.Sprintf("Unexpected number of parts found Want %d, Got %d", expectedParts, uploadedParts) + return ErrorResponse{ + Code: "InvalidParts", + Message: msg, + RequestID: "minio", + } +} + // ErrInvalidObjectPrefix - invalid object prefix response is // similar to object name response. var ErrInvalidObjectPrefix = ErrInvalidObjectName diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-fput-object.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-fput-object.go index 059710038..00b10aabb 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-fput-object.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-fput-object.go @@ -93,7 +93,7 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs. // Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers. if isGoogleEndpoint(c.endpointURL) { - if fileSize <= -1 || fileSize > int64(maxSinglePutObjectSize) { + if fileSize > int64(maxSinglePutObjectSize) { return 0, ErrorResponse{ Code: "NotImplemented", Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize), @@ -108,7 +108,7 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) // NOTE: S3 doesn't allow anonymous multipart requests. if isAmazonEndpoint(c.endpointURL) && c.anonymous { - if fileSize <= -1 || fileSize > int64(maxSinglePutObjectSize) { + if fileSize > int64(maxSinglePutObjectSize) { return 0, ErrorResponse{ Code: "NotImplemented", Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", fileSize), @@ -121,14 +121,11 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) return n, err } - // Large file upload is initiated for uploads for input data size - // if its greater than 5MiB or data size is negative. - if fileSize >= minimumPartSize || fileSize < 0 { - n, err := c.fputLargeObject(bucketName, objectName, fileData, fileSize, contentType) - return n, err + // Small object upload is initiated for uploads for input data size smaller than 5MiB. + if fileSize < minimumPartSize { + return c.putSmallObject(bucketName, objectName, fileData, fileSize, contentType) } - n, err := c.putSmallObject(bucketName, objectName, fileData, fileSize, contentType) - return n, err + return c.fputLargeObject(bucketName, objectName, fileData, fileSize, contentType) } // computeHash - calculates MD5 and Sha256 for an input read Seeker. @@ -192,7 +189,6 @@ func (c Client) fputLargeObject(bucketName, objectName string, fileData *os.File var prevMaxPartSize int64 // Loop through all parts and calculate totalUploadedSize. for _, partInfo := range partsInfo { - totalUploadedSize += partInfo.Size // Choose the maximum part size. if partInfo.Size >= prevMaxPartSize { prevMaxPartSize = partInfo.Size @@ -206,11 +202,14 @@ func (c Client) fputLargeObject(bucketName, objectName string, fileData *os.File partSize = prevMaxPartSize } - // Part number always starts with '1'. - partNumber := 1 + // Part number always starts with '0'. + partNumber := 0 // Loop through until EOF. for totalUploadedSize < fileSize { + // Increment part number. + partNumber++ + // Get a section reader on a particular offset. sectionReader := io.NewSectionReader(fileData, totalUploadedSize, partSize) @@ -221,7 +220,7 @@ func (c Client) fputLargeObject(bucketName, objectName string, fileData *os.File } // Save all the part metadata. - partMdata := partMetadata{ + prtData := partData{ ReadCloser: ioutil.NopCloser(sectionReader), Size: size, MD5Sum: md5Sum, @@ -229,31 +228,26 @@ func (c Client) fputLargeObject(bucketName, objectName string, fileData *os.File Number: partNumber, // Part number to be uploaded. } - // If part number already uploaded, move to the next one. - if isPartUploaded(objectPart{ - ETag: hex.EncodeToString(partMdata.MD5Sum), - PartNumber: partMdata.Number, + // If part not uploaded proceed to upload. + if !isPartUploaded(objectPart{ + ETag: hex.EncodeToString(prtData.MD5Sum), + PartNumber: prtData.Number, }, partsInfo) { - // Close the read closer. - partMdata.ReadCloser.Close() - continue + // Upload the part. + objPart, err := c.uploadPart(bucketName, objectName, uploadID, prtData) + if err != nil { + prtData.ReadCloser.Close() + return totalUploadedSize, err + } + // Save successfully uploaded part metadata. + partsInfo[prtData.Number] = objPart } - // Upload the part. - objPart, err := c.uploadPart(bucketName, objectName, uploadID, partMdata) - if err != nil { - partMdata.ReadCloser.Close() - return totalUploadedSize, err - } + // Close the read closer for temporary file. + prtData.ReadCloser.Close() // Save successfully uploaded size. - totalUploadedSize += partMdata.Size - - // Save successfully uploaded part metadata. - partsInfo[partMdata.Number] = objPart - - // Increment to next part number. - partNumber++ + totalUploadedSize += prtData.Size } // if totalUploadedSize is different than the file 'size'. Do not complete the request throw an error. @@ -269,6 +263,11 @@ func (c Client) fputLargeObject(bucketName, objectName string, fileData *os.File completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart) } + // If partNumber is different than total list of parts, error out. + if partNumber != len(completeMultipartUpload.Parts) { + return totalUploadedSize, ErrInvalidParts(partNumber, len(completeMultipartUpload.Parts)) + } + // Sort all completed parts. sort.Sort(completedParts(completeMultipartUpload.Parts)) _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload) diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go index e35dcf930..7596278af 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go @@ -365,9 +365,12 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) ( return nil, ObjectStat{}, HTTPRespToErrorResponse(resp, bucketName, objectName) } } - // trim off the odd double quotes. - md5sum := strings.Trim(resp.Header.Get("ETag"), "\"") - // parse the date. + + // Trim off the odd double quotes from ETag in the beginning and end. + md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + md5sum = strings.TrimSuffix(md5sum, "\"") + + // Parse the date. date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) if err != nil { msg := "Last-Modified time format not recognized. " + reportIssue @@ -379,6 +382,7 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) ( AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), } } + // Get content-type. contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) if contentType == "" { contentType = "application/octet-stream" diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-list.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-list.go index 4de5da89d..8838900a8 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-list.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-list.go @@ -20,6 +20,7 @@ import ( "fmt" "net/http" "net/url" + "strings" ) // ListBuckets list all buckets owned by this authenticated user. @@ -393,6 +394,9 @@ func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsI } // Append to parts info. for _, part := range listObjPartsResult.ObjectParts { + // Trim off the odd double quotes from ETag in the beginning and end. + part.ETag = strings.TrimPrefix(part.ETag, "\"") + part.ETag = strings.TrimSuffix(part.ETag, "\"") partsInfo[part.PartNumber] = part } // Keep part number marker, for the next iteration. diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-partial.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-partial.go index 3b7a5b733..8c05d8858 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-partial.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-partial.go @@ -17,11 +17,14 @@ package minio import ( + "bytes" "crypto/md5" "crypto/sha256" "errors" + "fmt" "hash" "io" + "io/ioutil" "sort" ) @@ -34,9 +37,187 @@ func (c Client) PutObjectPartial(bucketName, objectName string, data ReadAtClose if err := isValidObjectName(objectName); err != nil { return 0, err } + // Input size negative should return error. + if size < 0 { + return 0, ErrInvalidArgument("Input file size cannot be negative.") + } + // Input size bigger than 5TiB should fail. + if size > int64(maxMultipartPutObjectSize) { + return 0, ErrInvalidArgument("Input file size is bigger than the supported maximum of 5TiB.") + } - // Cleanup any previously left stale files, as the function exits. - defer cleanupStaleTempfiles("multiparts$-putobject-partial") + // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. + // So we fall back to single PUT operation with the maximum limit of 5GiB. + if isGoogleEndpoint(c.endpointURL) { + if size > int64(maxSinglePutObjectSize) { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", size), + Key: objectName, + BucketName: bucketName, + } + } + // Do not compute MD5 for Google Cloud Storage. Uploads upto 5GiB in size. + n, err := c.putPartialNoChksum(bucketName, objectName, data, size, contentType) + return n, err + } + + // NOTE: S3 doesn't allow anonymous multipart requests. + if isAmazonEndpoint(c.endpointURL) && c.anonymous { + if size > int64(maxSinglePutObjectSize) { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", size), + Key: objectName, + BucketName: bucketName, + } + } + // Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GiB in size. + n, err := c.putPartialAnonymous(bucketName, objectName, data, size, contentType) + return n, err + } + + // Small file upload is initiated for uploads for input data size smaller than 5MiB. + if size < minimumPartSize { + n, err = c.putPartialSmallObject(bucketName, objectName, data, size, contentType) + return n, err + } + n, err = c.putPartialLargeObject(bucketName, objectName, data, size, contentType) + return n, err + +} + +// putNoChecksumPartial special function used Google Cloud Storage. This special function +// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. +func (c Client) putPartialNoChksum(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + if size > maxPartSize { + return 0, ErrEntityTooLarge(size, bucketName, objectName) + } + + // Create a new pipe to stage the reads. + reader, writer := io.Pipe() + + // readAtOffset to carry future offsets. + var readAtOffset int64 + + // readAt defaults to reading at 5MiB buffer. + readAtBuffer := make([]byte, 1024*1024*5) + + // Initiate a routine to start writing. + go func() { + for { + readAtSize, rerr := data.ReadAt(readAtBuffer, readAtOffset) + if rerr != nil { + if rerr != io.EOF { + writer.CloseWithError(rerr) + return + } + } + writeSize, werr := writer.Write(readAtBuffer[:readAtSize]) + if werr != nil { + writer.CloseWithError(werr) + return + } + if readAtSize != writeSize { + writer.CloseWithError(errors.New("Something really bad happened here. " + reportIssue)) + return + } + readAtOffset += int64(writeSize) + if rerr == io.EOF { + writer.Close() + return + } + } + }() + // For anonymous requests, we will not calculate sha256 and md5sum. + putObjData := putObjectData{ + MD5Sum: nil, + Sha256Sum: nil, + ReadCloser: reader, + Size: size, + ContentType: contentType, + } + // Execute put object. + st, err := c.putObject(bucketName, objectName, putObjData) + if err != nil { + return 0, err + } + if st.Size != size { + return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) + } + return size, nil +} + +// putAnonymousPartial is a special function for uploading content as anonymous request. +// This special function is necessary since Amazon S3 doesn't allow anonymous multipart uploads. +func (c Client) putPartialAnonymous(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + return c.putPartialNoChksum(bucketName, objectName, data, size, contentType) +} + +// putSmallObjectPartial uploads files smaller than 5MiB. +func (c Client) putPartialSmallObject(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + + // readAt defaults to reading at 5MiB buffer. + readAtBuffer := make([]byte, size) + readAtSize, err := data.ReadAt(readAtBuffer, 0) + if err != nil { + if err != io.EOF { + return 0, err + } + } + if int64(readAtSize) != size { + return 0, ErrUnexpectedEOF(int64(readAtSize), size, bucketName, objectName) + } + + // Construct a new PUT object metadata. + putObjData := putObjectData{ + MD5Sum: sumMD5(readAtBuffer), + Sha256Sum: sum256(readAtBuffer), + ReadCloser: ioutil.NopCloser(bytes.NewReader(readAtBuffer)), + Size: size, + ContentType: contentType, + } + // Single part use case, use putObject directly. + st, err := c.putObject(bucketName, objectName, putObjData) + if err != nil { + return 0, err + } + if st.Size != size { + return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) + } + return size, nil +} + +// putPartialLargeObject uploads files bigger than 5MiB. +func (c Client) putPartialLargeObject(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } // getUploadID for an object, initiates a new multipart request // if it cannot find any previously partially uploaded object. @@ -139,7 +320,7 @@ func (c Client) PutObjectPartial(bucketName, objectName string, data ReadAtClose } // Save all the part metadata. - partMdata := partMetadata{ + prtData := partData{ ReadCloser: tmpFile, MD5Sum: hashMD5.Sum(nil), Size: totalReadPartSize, @@ -147,25 +328,25 @@ func (c Client) PutObjectPartial(bucketName, objectName string, data ReadAtClose // Signature version '4'. if c.signature.isV4() { - partMdata.Sha256Sum = hashSha256.Sum(nil) + prtData.Sha256Sum = hashSha256.Sum(nil) } // Current part number to be uploaded. - partMdata.Number = partNumber + prtData.Number = partNumber // execute upload part. - objPart, err := c.uploadPart(bucketName, objectName, uploadID, partMdata) + objPart, err := c.uploadPart(bucketName, objectName, uploadID, prtData) if err != nil { // Close the read closer. - partMdata.ReadCloser.Close() + prtData.ReadCloser.Close() return totalUploadedSize, err } // Save successfully uploaded size. - totalUploadedSize += partMdata.Size + totalUploadedSize += prtData.Size // Save successfully uploaded part metadata. - partsInfo[partMdata.Number] = objPart + partsInfo[prtData.Number] = objPart // Move to next part. partNumber++ diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go index 300ed4b40..563856bae 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go @@ -50,8 +50,8 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part // - For size input as -1 PutObject does a multipart Put operation until input stream reaches EOF. // Maximum object size that can be uploaded through this operation will be 5TiB. // -// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs. -// Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers. +// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. +// So we fall back to single PUT operation with the maximum limit of 5GiB. // // NOTE: For anonymous requests Amazon S3 doesn't allow multipart upload. So we fall back to single PUT operation. func (c Client) PutObject(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) { @@ -63,8 +63,8 @@ func (c Client) PutObject(bucketName, objectName string, data io.Reader, size in return 0, err } - // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs. - // Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers. + // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. + // So we fall back to single PUT operation with the maximum limit of 5GiB. if isGoogleEndpoint(c.endpointURL) { if size <= -1 { return 0, ErrorResponse{ @@ -114,7 +114,7 @@ func (c Client) putNoChecksum(bucketName, objectName string, data io.Reader, siz return 0, ErrEntityTooLarge(size, bucketName, objectName) } // For anonymous requests, we will not calculate sha256 and md5sum. - putObjMetadata := putObjectMetadata{ + putObjData := putObjectData{ MD5Sum: nil, Sha256Sum: nil, ReadCloser: ioutil.NopCloser(data), @@ -122,9 +122,13 @@ func (c Client) putNoChecksum(bucketName, objectName string, data io.Reader, siz ContentType: contentType, } // Execute put object. - if _, err := c.putObject(bucketName, objectName, putObjMetadata); err != nil { + st, err := c.putObject(bucketName, objectName, putObjData) + if err != nil { return 0, err } + if st.Size != size { + return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) + } return size, nil } @@ -160,7 +164,7 @@ func (c Client) putSmallObject(bucketName, objectName string, data io.Reader, si return 0, ErrUnexpectedEOF(int64(len(dataBytes)), size, bucketName, objectName) } // Construct a new PUT object metadata. - putObjMetadata := putObjectMetadata{ + putObjData := putObjectData{ MD5Sum: sumMD5(dataBytes), Sha256Sum: sum256(dataBytes), ReadCloser: ioutil.NopCloser(bytes.NewReader(dataBytes)), @@ -168,9 +172,13 @@ func (c Client) putSmallObject(bucketName, objectName string, data io.Reader, si ContentType: contentType, } // Single part use case, use putObject directly. - if _, err := c.putObject(bucketName, objectName, putObjMetadata); err != nil { + st, err := c.putObject(bucketName, objectName, putObjData) + if err != nil { return 0, err } + if st.Size != size { + return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) + } return size, nil } @@ -189,12 +197,13 @@ func (c Client) hashCopy(writer io.ReadWriteSeeker, data io.Reader, partSize int // Copies to input at writer. size, err = io.CopyN(hashWriter, data, partSize) if err != nil { + // If not EOF return error right here. if err != io.EOF { return nil, nil, 0, err } } - // Seek back to beginning of input. + // Seek back to beginning of input, any error fail right here. if _, err := writer.Seek(0, 0); err != nil { return nil, nil, 0, err } @@ -204,7 +213,7 @@ func (c Client) hashCopy(writer io.ReadWriteSeeker, data io.Reader, partSize int if c.signature.isV4() { sha256Sum = hashSha256.Sum(nil) } - return md5Sum, sha256Sum, size, nil + return md5Sum, sha256Sum, size, err } // putLargeObject uploads files bigger than 5 mega bytes. @@ -217,9 +226,6 @@ func (c Client) putLargeObject(bucketName, objectName string, data io.Reader, si return 0, err } - // Cleanup any previously left stale files, as the function exits. - defer cleanupStaleTempfiles("multiparts$-putobject") - // getUploadID for an object, initiates a new multipart request // if it cannot find any previously partially uploaded object. uploadID, err := c.getUploadID(bucketName, objectName, contentType) @@ -242,7 +248,6 @@ func (c Client) putLargeObject(bucketName, objectName string, data io.Reader, si var prevMaxPartSize int64 // Loop through all parts and calculate totalUploadedSize. for _, partInfo := range partsInfo { - totalUploadedSize += partInfo.Size // Choose the maximum part size. if partInfo.Size >= prevMaxPartSize { prevMaxPartSize = partInfo.Size @@ -256,15 +261,13 @@ func (c Client) putLargeObject(bucketName, objectName string, data io.Reader, si partSize = prevMaxPartSize } - // Part number always starts with '1'. - partNumber := 1 + // Part number always starts with '0'. + partNumber := 0 // Loop through until EOF. for { - // We have reached EOF, break out. - if totalUploadedSize == size { - break - } + // Increment part number. + partNumber++ // Initialize a new temporary file. tmpFile, err := newTempFile("multiparts$-putobject") @@ -273,15 +276,15 @@ func (c Client) putLargeObject(bucketName, objectName string, data io.Reader, si } // Calculates MD5 and Sha256 sum while copying partSize bytes into tmpFile. - md5Sum, sha256Sum, size, err := c.hashCopy(tmpFile, data, partSize) - if err != nil { - if err != io.EOF { - return 0, err + md5Sum, sha256Sum, size, rErr := c.hashCopy(tmpFile, data, partSize) + if rErr != nil { + if rErr != io.EOF { + return 0, rErr } } // Save all the part metadata. - partMdata := partMetadata{ + prtData := partData{ ReadCloser: tmpFile, Size: size, MD5Sum: md5Sum, @@ -289,39 +292,28 @@ func (c Client) putLargeObject(bucketName, objectName string, data io.Reader, si Number: partNumber, // Current part number to be uploaded. } - // If part number already uploaded, move to the next one. - if isPartUploaded(objectPart{ - ETag: hex.EncodeToString(partMdata.MD5Sum), + // If part not uploaded proceed to upload. + if !isPartUploaded(objectPart{ + ETag: hex.EncodeToString(prtData.MD5Sum), PartNumber: partNumber, }, partsInfo) { - // Close the read closer. - partMdata.ReadCloser.Close() - continue + // execute upload part. + objPart, err := c.uploadPart(bucketName, objectName, uploadID, prtData) + if err != nil { + // Close the read closer. + prtData.ReadCloser.Close() + return 0, err + } + // Save successfully uploaded part metadata. + partsInfo[prtData.Number] = objPart } - // execute upload part. - objPart, err := c.uploadPart(bucketName, objectName, uploadID, partMdata) - if err != nil { - // Close the read closer. - partMdata.ReadCloser.Close() - return totalUploadedSize, err - } + // Close the read closer. + prtData.ReadCloser.Close() - // Save successfully uploaded size. - totalUploadedSize += partMdata.Size - - // Save successfully uploaded part metadata. - partsInfo[partMdata.Number] = objPart - - // Move to next part. - partNumber++ - } - - // If size is greater than zero verify totalWritten. - // if totalWritten is different than the input 'size', do not complete the request throw an error. - if size > 0 { - if totalUploadedSize != size { - return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + // If read error was an EOF, break out of the loop. + if rErr == io.EOF { + break } } @@ -331,6 +323,21 @@ func (c Client) putLargeObject(bucketName, objectName string, data io.Reader, si complPart.ETag = part.ETag complPart.PartNumber = part.PartNumber completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart) + // Save successfully uploaded size. + totalUploadedSize += part.Size + } + + // If size is greater than zero verify totalUploadedSize. if totalUploadedSize is + // different than the input 'size', do not complete the request throw an error. + if size > 0 { + if totalUploadedSize != size { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + } + + // If partNumber is different than total list of parts, error out. + if partNumber != len(completeMultipartUpload.Parts) { + return totalUploadedSize, ErrInvalidParts(partNumber, len(completeMultipartUpload.Parts)) } // Sort all completed parts. @@ -346,7 +353,7 @@ func (c Client) putLargeObject(bucketName, objectName string, data io.Reader, si // putObject - add an object to a bucket. // NOTE: You must have WRITE permissions on a bucket to add an object to it. -func (c Client) putObject(bucketName, objectName string, putObjMetadata putObjectMetadata) (ObjectStat, error) { +func (c Client) putObject(bucketName, objectName string, putObjData putObjectData) (ObjectStat, error) { // Input validation. if err := isValidBucketName(bucketName); err != nil { return ObjectStat{}, err @@ -355,23 +362,23 @@ func (c Client) putObject(bucketName, objectName string, putObjMetadata putObjec return ObjectStat{}, err } - if strings.TrimSpace(putObjMetadata.ContentType) == "" { - putObjMetadata.ContentType = "application/octet-stream" + if strings.TrimSpace(putObjData.ContentType) == "" { + putObjData.ContentType = "application/octet-stream" } // Set headers. customHeader := make(http.Header) - customHeader.Set("Content-Type", putObjMetadata.ContentType) + customHeader.Set("Content-Type", putObjData.ContentType) // Populate request metadata. reqMetadata := requestMetadata{ bucketName: bucketName, objectName: objectName, customHeader: customHeader, - contentBody: putObjMetadata.ReadCloser, - contentLength: putObjMetadata.Size, - contentSha256Bytes: putObjMetadata.Sha256Sum, - contentMD5Bytes: putObjMetadata.MD5Sum, + contentBody: putObjData.ReadCloser, + contentLength: putObjData.Size, + contentSha256Bytes: putObjData.Sha256Sum, + contentMD5Bytes: putObjData.MD5Sum, } // Initiate new request. req, err := c.newRequest("PUT", reqMetadata) @@ -389,11 +396,15 @@ func (c Client) putObject(bucketName, objectName string, putObjMetadata putObjec return ObjectStat{}, HTTPRespToErrorResponse(resp, bucketName, objectName) } } + var metadata ObjectStat - // Trim off the odd double quotes from ETag. - metadata.ETag = strings.Trim(resp.Header.Get("ETag"), "\"") + // Trim off the odd double quotes from ETag in the beginning and end. + metadata.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + metadata.ETag = strings.TrimSuffix(metadata.ETag, "\"") // A success here means data was written to server successfully. - metadata.Size = putObjMetadata.Size + metadata.Size = putObjData.Size + + // Return here. return metadata, nil } @@ -452,7 +463,7 @@ func (c Client) initiateMultipartUpload(bucketName, objectName, contentType stri } // uploadPart uploads a part in a multipart upload. -func (c Client) uploadPart(bucketName, objectName, uploadID string, uploadingPart partMetadata) (objectPart, error) { +func (c Client) uploadPart(bucketName, objectName, uploadID string, uploadingPart partData) (objectPart, error) { // Input validation. if err := isValidBucketName(bucketName); err != nil { return objectPart{}, err @@ -496,8 +507,11 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, uploadingPar } // Once successfully uploaded, return completed part. objPart := objectPart{} + objPart.Size = uploadingPart.Size objPart.PartNumber = uploadingPart.Number - objPart.ETag = resp.Header.Get("ETag") + // Trim off the odd double quotes from ETag in the beginning and end. + objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"") return objPart, nil } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-s3-definitions.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-s3-definitions.go index 16d87a70e..61931b0b3 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-s3-definitions.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-s3-definitions.go @@ -103,9 +103,6 @@ type objectPart struct { // Size of the uploaded part data. Size int64 - - // Error - Err error } // listObjectPartsResult container for ListObjectParts response. diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go index 9c5e96cf3..8a29bccd5 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go @@ -73,7 +73,12 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectStat, error) { return ObjectStat{}, HTTPRespToErrorResponse(resp, bucketName, objectName) } } - md5sum := strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes + + // Trim off the odd double quotes from ETag in the beginning and end. + md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + md5sum = strings.TrimSuffix(md5sum, "\"") + + // Parse content length. size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) if err != nil { return ObjectStat{}, ErrorResponse{ diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go index 886959de3..f7bd81097 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go @@ -54,6 +54,142 @@ func randString(n int, src rand.Source) string { return string(b[0:30]) } +func TestResumableFPutObject(t *testing.T) { + if testing.Short() { + t.Skip("skipping resumable tests with short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "play.minio.io:9002", + "Q3AM3UQ867SPQQA43P2F", + "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(nil) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + file, err := ioutil.TempFile(os.TempDir(), "resumable") + if err != nil { + t.Fatal("Error:", err) + } + + n, _ := io.CopyN(file, crand.Reader, 11*1024*1024) + if n != int64(11*1024*1024) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) + } + + objectName := bucketName + "-resumable" + + n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream") + if err != nil { + t.Fatal("Error:", err) + } + if n != int64(11*1024*1024) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) + } + + // Close the file pro-actively for windows. + file.Close() + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } + + err = os.Remove(file.Name()) + if err != nil { + t.Fatal("Error:", err) + } +} + +func TestResumablePutObject(t *testing.T) { + if testing.Short() { + t.Skip("skipping resumable tests with short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "play.minio.io:9002", + "Q3AM3UQ867SPQQA43P2F", + "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(nil) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // generate 11MB + buf := make([]byte, 11*1024*1024) + + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error:", err) + } + + objectName := bucketName + "-resumable" + reader := bytes.NewReader(buf) + n, err := c.PutObject(bucketName, objectName, reader, int64(reader.Len()), "application/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + func TestGetObjectPartialFunctional(t *testing.T) { // Seed random based on current time. rand.Seed(time.Now().Unix()) @@ -177,6 +313,14 @@ func TestGetObjectPartialFunctional(t *testing.T) { t.Fatal("Error:", err, len(buf6)) } } + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } } func TestFunctional(t *testing.T) { @@ -271,14 +415,26 @@ func TestFunctional(t *testing.T) { // generate data buf := make([]byte, rand.Intn(1<<19)) - reader := bytes.NewReader(buf) + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error: ", err) + } - n, err := c.PutObject(bucketName, objectName, reader, int64(reader.Len()), "") + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "") if err != nil { t.Fatal("Error: ", err) } if n != int64(len(buf)) { - t.Fatal("Error: bad length ", n, reader.Len()) + t.Fatal("Error: bad length ", n, len(buf)) + } + + n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), -1, "binary/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName+"-nolength") + } + + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) } newReader, _, err := c.GetObject(bucketName, objectName) @@ -333,6 +489,10 @@ func TestFunctional(t *testing.T) { t.Fatal("Error: ", err) } buf = make([]byte, rand.Intn(1<<20)) + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error: ", err) + } req, err := http.NewRequest("PUT", presignedPutURL, bytes.NewReader(buf)) if err != nil { t.Fatal("Error: ", err) @@ -365,25 +525,25 @@ func TestFunctional(t *testing.T) { if err != nil { t.Fatal("Error: ", err) } + err = c.RemoveObject(bucketName, objectName+"-nolength") + if err != nil { + t.Fatal("Error: ", err) + } err = c.RemoveObject(bucketName, objectName+"-presigned") if err != nil { t.Fatal("Error: ", err) } - err = c.RemoveBucket(bucketName) if err != nil { t.Fatal("Error:", err) } - err = c.RemoveBucket("bucket1") if err == nil { t.Fatal("Error:") } - if err.Error() != "The specified bucket does not exist." { t.Fatal("Error: ", err) } - if err = os.Remove(fileName); err != nil { t.Fatal("Error: ", err) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml b/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml index 963698a04..444696bc5 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml +++ b/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml @@ -27,7 +27,7 @@ build_script: - golint github.com/minio/minio-go... - deadcode - go test - - go test -race + - go test -test.short -race # to disable automatic tests test: off diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/tempfile.go b/Godeps/_workspace/src/github.com/minio/minio-go/tempfile.go index 34508569f..e9fada3e6 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/tempfile.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/tempfile.go @@ -19,7 +19,6 @@ package minio import ( "io/ioutil" "os" - "path/filepath" "sync" ) @@ -42,21 +41,6 @@ func newTempFile(prefix string) (*tempFile, error) { }, nil } -// cleanupStaleTempFiles - cleanup any stale files present in temp directory at a prefix. -func cleanupStaleTempfiles(prefix string) error { - globPath := filepath.Join(os.TempDir(), prefix) + "*" - staleFiles, err := filepath.Glob(globPath) - if err != nil { - return err - } - for _, staleFile := range staleFiles { - if err := os.Remove(staleFile); err != nil { - return err - } - } - return nil -} - // Close - closer wrapper to close and remove temporary file. func (t *tempFile) Close() error { t.mutex.Lock() From 5722ccfcdaf7104fa683809672397a0f41ac0227 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 2 Jan 2016 14:38:45 +0100 Subject: [PATCH 40/55] Fix s3 backend, add more tests --- backend/backend_test.go | 114 ++++++++++++++++++++++++++++++---------- backend/s3/s3.go | 37 ++++++++----- backend/s3/s3_test.go | 7 +++ 3 files changed, 118 insertions(+), 40 deletions(-) create mode 100644 backend/s3/s3_test.go diff --git a/backend/backend_test.go b/backend/backend_test.go index f38fbced5..eddc016b8 100644 --- a/backend/backend_test.go +++ b/backend/backend_test.go @@ -5,9 +5,12 @@ import ( "fmt" "io" "io/ioutil" + "math/rand" "sort" "testing" + crand "crypto/rand" + "github.com/restic/restic/backend" . "github.com/restic/restic/test" ) @@ -37,6 +40,70 @@ func testBackendConfig(b backend.Backend, t *testing.T) { } } +func testGetReader(b backend.Backend, t testing.TB) { + length := rand.Intn(1<<23) + 2000 + + data := make([]byte, length) + _, err := io.ReadFull(crand.Reader, data) + OK(t, err) + + blob, err := b.Create() + OK(t, err) + + id := backend.Hash(data) + + _, err = blob.Write([]byte(data)) + OK(t, err) + OK(t, blob.Finalize(backend.Data, id.String())) + + for i := 0; i < 500; i++ { + l := rand.Intn(length + 2000) + o := rand.Intn(length + 2000) + + d := data + if o < len(d) { + d = d[o:] + } else { + o = len(d) + d = d[:0] + } + + if l > 0 && l < len(d) { + d = d[:l] + } + + rd, err := b.GetReader(backend.Data, id.String(), uint(o), uint(l)) + OK(t, err) + buf, err := ioutil.ReadAll(rd) + OK(t, err) + + if !bytes.Equal(buf, d) { + t.Fatalf("data not equal") + } + } + + OK(t, b.Remove(backend.Data, id.String())) +} + +func store(t testing.TB, b backend.Backend, tpe backend.Type, data []byte) { + id := backend.Hash(data) + + blob, err := b.Create() + OK(t, err) + + _, err = blob.Write([]byte(data)) + OK(t, err) + OK(t, blob.Finalize(tpe, id.String())) +} + +func read(t testing.TB, rd io.Reader, expectedData []byte) { + buf, err := ioutil.ReadAll(rd) + OK(t, err) + if expectedData != nil { + Equals(t, expectedData, buf) + } +} + func testBackend(b backend.Backend, t *testing.T) { testBackendConfig(b, t) @@ -70,41 +137,34 @@ func testBackend(b backend.Backend, t *testing.T) { // add files for _, test := range TestStrings { - // store string in backend - blob, err := b.Create() - OK(t, err) + store(t, b, tpe, []byte(test.data)) - _, err = blob.Write([]byte(test.data)) - OK(t, err) - OK(t, blob.Finalize(tpe, test.id)) - - // try to get it out again + // test Get() rd, err := b.Get(tpe, test.id) OK(t, err) Assert(t, rd != nil, "Get() returned nil") - // try to read it out again - reader, err := b.GetReader(tpe, test.id, 0, uint(len(test.data))) + read(t, rd, []byte(test.data)) + OK(t, rd.Close()) + + // test GetReader() + rd, err = b.GetReader(tpe, test.id, 0, uint(len(test.data))) OK(t, err) - Assert(t, reader != nil, "GetReader() returned nil") - bytes := make([]byte, len(test.data)) - reader.Read(bytes) - Assert(t, test.data == string(bytes), "Read() returned different content") + Assert(t, rd != nil, "GetReader() returned nil") + + read(t, rd, []byte(test.data)) + OK(t, rd.Close()) // try to read it out with an offset and a length - readerOffLen, err := b.GetReader(tpe, test.id, 1, uint(len(test.data)-2)) + start := 1 + end := len(test.data) - 2 + length := end - start + rd, err = b.GetReader(tpe, test.id, uint(start), uint(length)) OK(t, err) - Assert(t, readerOffLen != nil, "GetReader() returned nil") - bytesOffLen := make([]byte, len(test.data)-2) - readerOffLen.Read(bytesOffLen) - Assert(t, test.data[1:len(test.data)-1] == string(bytesOffLen), "Read() with offset and length returned different content") + Assert(t, rd != nil, "GetReader() returned nil") - buf, err := ioutil.ReadAll(rd) - OK(t, err) - Equals(t, test.data, string(buf)) - - // compare content - Equals(t, test.data, string(buf)) + read(t, rd, []byte(test.data[start:end])) + OK(t, rd.Close()) } // test adding the first file again @@ -161,7 +221,6 @@ func testBackend(b backend.Backend, t *testing.T) { found, err := b.Test(tpe, id.String()) OK(t, err) - Assert(t, found, fmt.Sprintf("id %q was not found before removal", id)) OK(t, b.Remove(tpe, id.String())) @@ -170,6 +229,7 @@ func testBackend(b backend.Backend, t *testing.T) { Assert(t, !found, fmt.Sprintf("id %q not found after removal", id)) } } - } + + testGetReader(b, t) } diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 6ee309112..32dd2fbd6 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -3,6 +3,7 @@ package s3 import ( "bytes" "errors" + "fmt" "io" "strings" @@ -116,14 +117,22 @@ func (bb *s3Blob) Finalize(t backend.Type, name string) error { return errors.New("key already exists") } + expectedBytes := bb.buf.Len() + <-bb.b.connChan - _, err = bb.b.client.PutObject(bb.b.bucketname, path, bb.buf, int64(bb.buf.Len()), "binary/octet-stream") + n, err := bb.b.client.PutObject(bb.b.bucketname, path, bb.buf, int64(bb.buf.Len()), "binary/octet-stream") bb.b.connChan <- struct{}{} - bb.buf.Reset() - debug.Log("s3.Finalize", "finalized %v -> err %v", path, err) + debug.Log("s3.Finalize", "finalized %v -> n %v, err %v", path, n, err) + if err != nil { + return err + } - return err + if n != int64(expectedBytes) { + return errors.New("could not store all bytes") + } + + return nil } // Create creates a new Blob. The data is available only after Finalize() @@ -160,24 +169,26 @@ func (be *S3Backend) GetReader(t backend.Type, name string, offset, length uint) l, o := int64(length), int64(offset) if l == 0 { - l = stat.Size - o + l = stat.Size } - if l > stat.Size-o { + if o > stat.Size { + return nil, fmt.Errorf("offset beyond end of file (%v > %v)", o, stat.Size) + } + + if o+l > stat.Size { l = stat.Size - o } debug.Log("s3.GetReader", "%v %v, o %v l %v", t, name, o, l) - buf := make([]byte, l) - n, err := rd.ReadAt(buf, o) - debug.Log("s3.GetReader", " -> n %v err %v", n, err) - if err == io.EOF && int64(n) == l { - debug.Log("s3.GetReader", " ignoring EOF error") - err = nil + var r io.Reader + r = &ContinuousReader{R: rd, Offset: o} + if length > 0 { + r = io.LimitReader(r, int64(length)) } - return backend.ReadCloser(bytes.NewReader(buf[:n])), err + return backend.ReadCloser(r), nil } // Test returns true if a blob of the given type and name exists in the backend. diff --git a/backend/s3/s3_test.go b/backend/s3/s3_test.go new file mode 100644 index 000000000..289748485 --- /dev/null +++ b/backend/s3/s3_test.go @@ -0,0 +1,7 @@ +package s3 + +import "testing" + +func TestGetReader(t *testing.T) { + +} From 338ad422738d31623b550c7a3bbe764931f03ed6 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 2 Jan 2016 15:33:17 +0100 Subject: [PATCH 41/55] location: fix tests --- location/location_test.go | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/location/location_test.go b/location/location_test.go index 702b2651e..b0303fad1 100644 --- a/location/location_test.go +++ b/location/location_test.go @@ -46,26 +46,33 @@ var parseTests = []struct { {"s3://eu-central-1/bucketname", Location{Scheme: "s3", Config: s3.Config{ - Region: "eu-central-1", - Bucket: "bucketname", + Endpoint: "eu-central-1", + Bucket: "bucketname", }}, }, {"s3://hostname.foo/bucketname", Location{Scheme: "s3", Config: s3.Config{ - Region: "hostname.foo", - Bucket: "bucketname", + Endpoint: "hostname.foo", + Bucket: "bucketname", }}, }, {"s3:eu-central-1/repo", Location{Scheme: "s3", Config: s3.Config{ - Region: "eu-central-1", - Bucket: "repo", + Endpoint: "eu-central-1", + Bucket: "repo", }}, }, {"s3:https://hostname.foo/repo", Location{Scheme: "s3", Config: s3.Config{ - URL: "https://hostname.foo", - Bucket: "repo", + Endpoint: "hostname.foo", + Bucket: "repo", + }}, + }, + {"s3:http://hostname.foo/repo", Location{Scheme: "s3", + Config: s3.Config{ + Endpoint: "hostname.foo", + Bucket: "repo", + UseHTTP: true, }}, }, } From 26eb85966349d29e330af2bd7b053b6c11b56fd1 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 2 Jan 2016 15:33:25 +0100 Subject: [PATCH 42/55] Dockerfile: Add sftp server binary --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index ca00c649f..e2810584d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -20,7 +20,7 @@ ARG GOARCH=amd64 # install dependencies RUN apt-get update -RUN apt-get install -y --no-install-recommends ca-certificates wget git build-essential +RUN apt-get install -y --no-install-recommends ca-certificates wget git build-essential openssh-server # add and configure user ENV HOME /home/travis From fc347ba60f8e8bea8dd6bf5be1a938341c465441 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 2 Jan 2016 15:33:38 +0100 Subject: [PATCH 43/55] Add new test with multiple writes for backends --- backend/backend_test.go | 50 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/backend/backend_test.go b/backend/backend_test.go index eddc016b8..66b7b3ea3 100644 --- a/backend/backend_test.go +++ b/backend/backend_test.go @@ -41,7 +41,7 @@ func testBackendConfig(b backend.Backend, t *testing.T) { } func testGetReader(b backend.Backend, t testing.TB) { - length := rand.Intn(1<<23) + 2000 + length := rand.Intn(1<<24) + 2000 data := make([]byte, length) _, err := io.ReadFull(crand.Reader, data) @@ -85,6 +85,53 @@ func testGetReader(b backend.Backend, t testing.TB) { OK(t, b.Remove(backend.Data, id.String())) } +func testWrite(b backend.Backend, t testing.TB) { + length := rand.Intn(1<<23) + 2000 + + data := make([]byte, length) + _, err := io.ReadFull(crand.Reader, data) + OK(t, err) + id := backend.Hash(data) + + for i := 0; i < 10; i++ { + blob, err := b.Create() + OK(t, err) + + o := 0 + for o < len(data) { + l := rand.Intn(len(data) - o) + if len(data)-o < 20 { + l = len(data) - o + } + + n, err := blob.Write(data[o : o+l]) + OK(t, err) + if n != l { + t.Fatalf("wrong number of bytes written, want %v, got %v", l, n) + } + + o += l + } + + name := fmt.Sprintf("%s-%d", id, i) + OK(t, blob.Finalize(backend.Data, name)) + + rd, err := b.Get(backend.Data, name) + OK(t, err) + + buf, err := ioutil.ReadAll(rd) + OK(t, err) + + if len(buf) != len(data) { + t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf)) + } + + if !bytes.Equal(buf, data) { + t.Fatalf("data not equal") + } + } +} + func store(t testing.TB, b backend.Backend, tpe backend.Type, data []byte) { id := backend.Hash(data) @@ -232,4 +279,5 @@ func testBackend(b backend.Backend, t *testing.T) { } testGetReader(b, t) + testWrite(b, t) } From 69e6e9e5c7efcbe72397b5871edc006ed1eae30e Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 2 Jan 2016 18:25:37 +0100 Subject: [PATCH 44/55] Update s3 library (again) --- Godeps/Godeps.json | 4 ++-- .../src/github.com/minio/minio-go/constants.go | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 97d89996a..8ec11491e 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -24,8 +24,8 @@ }, { "ImportPath": "github.com/minio/minio-go", - "Comment": "v0.2.5-195-gf30b6ca", - "Rev": "f30b6ca90bfda7578f6a11b7ba6af2eae7b0510c" + "Comment": "v0.2.5-197-g45a4b10", + "Rev": "45a4b10109a2313378515d89cd3be55ff58c11c2" }, { "ImportPath": "github.com/pkg/sftp", diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/constants.go b/Godeps/_workspace/src/github.com/minio/minio-go/constants.go index 617621298..f4978019f 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/constants.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/constants.go @@ -20,19 +20,19 @@ package minio // minimumPartSize - minimum part size 5MiB per object after which // putObject behaves internally as multipart. -var minimumPartSize int64 = 1024 * 1024 * 5 +const minimumPartSize = 1024 * 1024 * 5 // maxParts - maximum parts for a single multipart session. -var maxParts = int64(10000) +const maxParts = 10000 // maxPartSize - maximum part size 5GiB for a single multipart upload operation. -var maxPartSize int64 = 1024 * 1024 * 1024 * 5 +const maxPartSize = 1024 * 1024 * 1024 * 5 // maxSinglePutObjectSize - maximum size 5GiB of object per PUT operation. -var maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 +const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 // maxMultipartPutObjectSize - maximum size 5TiB of object for Multipart operation. -var maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 +const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 // optimalReadAtBufferSize - optimal buffer 5MiB used for reading through ReadAt operation. -var optimalReadAtBufferSize = 1024 * 1024 * 5 +const optimalReadAtBufferSize = 1024 * 1024 * 5 From 314182e7e06b85297a329a7f26ec80cb9d0259f1 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 3 Jan 2016 21:46:07 +0100 Subject: [PATCH 45/55] Add debug, do not create bucket if it already exists --- backend/s3/s3.go | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 32dd2fbd6..aca879698 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "os" "strings" "github.com/minio/minio-go" @@ -43,21 +44,17 @@ func Open(cfg Config) (backend.Backend, error) { be := &S3Backend{client: client, bucketname: cfg.Bucket} be.createConnections() - // create new bucket with default ACL in default region - err = client.MakeBucket(cfg.Bucket, "", "") + if err := client.BucketExists(cfg.Bucket); err != nil { + debug.Log("s3.Open", "BucketExists(%v) returned err %v, trying to create the bucket", err) - if err != nil { - e, ok := err.(minio.ErrorResponse) - if ok && e.Code == "BucketAlreadyExists" { - debug.Log("s3.Open", "ignoring error that bucket %q already exists", cfg.Bucket) - err = nil + // create new bucket with default ACL in default region + err = client.MakeBucket(cfg.Bucket, "", "") + + if err != nil { + return nil, err } } - if err != nil { - return nil, err - } - return be, nil } @@ -103,6 +100,7 @@ func (bb *s3Blob) Size() uint { } func (bb *s3Blob) Finalize(t backend.Type, name string) error { + debug.Log("s3.blob.Finalize()", "bucket %v, finalize %v, %d bytes", bb.b.bucketname, name, bb.buf.Len()) if bb.final { return errors.New("Already finalized") } @@ -114,16 +112,19 @@ func (bb *s3Blob) Finalize(t backend.Type, name string) error { // Check key does not already exist _, err := bb.b.client.StatObject(bb.b.bucketname, path) if err == nil { + debug.Log("s3.blob.Finalize()", "%v already exists", name) return errors.New("key already exists") } expectedBytes := bb.buf.Len() <-bb.b.connChan + debug.Log("s3.Finalize", "PutObject(%v, %v, %v, %v)", + bb.b.bucketname, path, int64(bb.buf.Len()), "binary/octet-stream") n, err := bb.b.client.PutObject(bb.b.bucketname, path, bb.buf, int64(bb.buf.Len()), "binary/octet-stream") + debug.Log("s3.Finalize", "finalized %v -> n %v, err %#v", path, n, err) bb.b.connChan <- struct{}{} - debug.Log("s3.Finalize", "finalized %v -> n %v, err %v", path, n, err) if err != nil { return err } @@ -216,6 +217,7 @@ func (be *S3Backend) Remove(t backend.Type, name string) error { // goroutine is started for this. If the channel done is closed, sending // stops. func (be *S3Backend) List(t backend.Type, done <-chan struct{}) <-chan string { + debug.Log("s3.List", "listing %v", t) ch := make(chan string) prefix := s3path(t, "") From 61e66e936f121c638569afc718e6f144d6ef940d Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 3 Jan 2016 21:48:57 +0100 Subject: [PATCH 46/55] Fix imports --- backend/s3/s3.go | 1 - 1 file changed, 1 deletion(-) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index aca879698..6872313c5 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "os" "strings" "github.com/minio/minio-go" From 181480b68b246671cf676096f68c5d80d7ddeece Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 3 Jan 2016 21:50:59 +0100 Subject: [PATCH 47/55] Update s3 library --- Godeps/Godeps.json | 4 ++-- .../src/github.com/minio/minio-go/.travis.yml | 13 +++++++++++++ .../src/github.com/minio/minio-go/api-put-bucket.go | 2 +- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 8ec11491e..ec42f905f 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -24,8 +24,8 @@ }, { "ImportPath": "github.com/minio/minio-go", - "Comment": "v0.2.5-197-g45a4b10", - "Rev": "45a4b10109a2313378515d89cd3be55ff58c11c2" + "Comment": "v0.2.5-201-g410319e", + "Rev": "410319e0e39a372998f4d9cd2b9da4ff243ae388" }, { "ImportPath": "github.com/pkg/sftp", diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/.travis.yml b/Godeps/_workspace/src/github.com/minio/minio-go/.travis.yml index c55421487..96a4e2a47 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/.travis.yml +++ b/Godeps/_workspace/src/github.com/minio/minio-go/.travis.yml @@ -1,9 +1,22 @@ +sudo: false language: go + +os: +- linux +- osx + +env: +- ARCH=x86_64 +- ARCH=i686 + go: - 1.5.1 +- 1.5.2 + script: - go vet ./... - go test -test.short -race -v ./... + notifications: slack: secure: HrOX2k6F/sEl6Rr4m5vHOdJCIwV42be0kz1Jy/WSMvrl/fQ8YkldKviLeWh4aWt1kclsYhNQ4FqGML+RIZYsdOqej4fAw9Vi5pZkI1MzPJq0UjrtMqkqzvD90eDGQYCKwaXjEIN8cohwJeb6X0B0HKAd9sqJW5GH5SwnhH5WWP8= diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go index 6293e64f3..e1880d9f8 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go @@ -102,7 +102,7 @@ func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location str // If endpoint supports virtual host style use that always. // Currently only S3 and Google Cloud Storage would support this. if isVirtualHostSupported(c.endpointURL) { - targetURL.Host = bucketName + "/" + c.endpointURL.Host + targetURL.Host = bucketName + "." + c.endpointURL.Host targetURL.Path = "/" } else { // If not fall back to using path style. From 0e9236475b6bd55683d78bd59750ef7d3f1fb918 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 7 Jan 2016 20:23:38 +0100 Subject: [PATCH 48/55] Update s3 library (again) --- Godeps/Godeps.json | 4 +- .../src/github.com/minio/minio-go/.travis.yml | 2 +- .../src/github.com/minio/minio-go/README.md | 10 +- .../minio/minio-go/api-definitions.go | 57 +- .../minio/minio-go/api-error-response.go | 93 ++- ...-fget-object.go => api-get-object-file.go} | 2 +- .../src/github.com/minio/minio-go/api-get.go | 343 +++++--- .../src/github.com/minio/minio-go/api-list.go | 109 ++- .../minio/minio-go/api-presigned.go | 11 +- .../minio/minio-go/api-put-bucket.go | 4 +- ...-fput-object.go => api-put-object-file.go} | 227 +++--- .../minio-go/api-put-object-multipart.go | 421 ++++++++++ .../minio/minio-go/api-put-object-partial.go | 378 --------- .../minio/minio-go/api-put-object-readat.go | 196 +++++ .../minio/minio-go/api-put-object.go | 529 +++--------- .../github.com/minio/minio-go/api-remove.go | 51 +- .../minio/minio-go/api-s3-definitions.go | 66 +- .../src/github.com/minio/minio-go/api-stat.go | 25 +- .../src/github.com/minio/minio-go/api.go | 129 ++- .../minio/minio-go/api_functional_v2_test.go | 751 ++++++++++++++++++ ...onal_test.go => api_functional_v4_test.go} | 340 ++++++-- .../{api_private_test.go => api_unit_test.go} | 117 ++- .../github.com/minio/minio-go/appveyor.yml | 4 +- .../github.com/minio/minio-go/bucket-acl.go | 14 +- .../github.com/minio/minio-go/bucket-cache.go | 31 +- .../minio/minio-go/common-methods.go | 52 -- .../github.com/minio/minio-go/constants.go | 12 +- .../minio/minio-go/examples/play/getobject.go | 26 +- .../examples/play/getobjectpartial.go | 91 --- .../minio/minio-go/examples/play/putobject.go | 3 +- .../examples/play/putobjectpartial.go | 56 -- .../minio/minio-go/examples/s3/getobject.go | 14 +- .../minio-go/examples/s3/getobjectpartial.go | 92 --- .../minio/minio-go/examples/s3/putobject.go | 3 +- .../minio-go/examples/s3/putobjectpartial.go | 57 -- .../github.com/minio/minio-go/post-policy.go | 57 +- .../minio/minio-go/request-signature-v2.go | 40 +- .../minio/minio-go/request-signature-v4.go | 87 +- .../minio/minio-go/signature-type.go | 16 + .../src/github.com/minio/minio-go/tempfile.go | 2 +- .../src/github.com/minio/minio-go/utils.go | 24 +- 41 files changed, 2793 insertions(+), 1753 deletions(-) rename Godeps/_workspace/src/github.com/minio/minio-go/{api-fget-object.go => api-get-object-file.go} (97%) rename Godeps/_workspace/src/github.com/minio/minio-go/{api-fput-object.go => api-put-object-file.go} (67%) create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-multipart.go delete mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-partial.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-readat.go create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api_functional_v2_test.go rename Godeps/_workspace/src/github.com/minio/minio-go/{api_functional_test.go => api_functional_v4_test.go} (62%) rename Godeps/_workspace/src/github.com/minio/minio-go/{api_private_test.go => api_unit_test.go} (60%) delete mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/common-methods.go delete mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobjectpartial.go delete mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobjectpartial.go delete mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobjectpartial.go delete mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobjectpartial.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index ec42f905f..03b9fcb05 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -24,8 +24,8 @@ }, { "ImportPath": "github.com/minio/minio-go", - "Comment": "v0.2.5-201-g410319e", - "Rev": "410319e0e39a372998f4d9cd2b9da4ff243ae388" + "Comment": "v0.2.5-205-g38be406", + "Rev": "38be40605dc37d2d7ec06169218365b46ae33e4b" }, { "ImportPath": "github.com/pkg/sftp", diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/.travis.yml b/Godeps/_workspace/src/github.com/minio/minio-go/.travis.yml index 96a4e2a47..f76844876 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/.travis.yml +++ b/Godeps/_workspace/src/github.com/minio/minio-go/.travis.yml @@ -15,7 +15,7 @@ go: script: - go vet ./... -- go test -test.short -race -v ./... +- go test -short -race -v ./... notifications: slack: diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/README.md b/Godeps/_workspace/src/github.com/minio/minio-go/README.md index 5417d8f14..e32bf6f5f 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/README.md +++ b/Godeps/_workspace/src/github.com/minio/minio-go/README.md @@ -67,14 +67,14 @@ func main() { * [RemoveBucket(bucketName) error](examples/s3/removebucket.go) * [GetBucketACL(bucketName) (BucketACL, error)](examples/s3/getbucketacl.go) * [SetBucketACL(bucketName, BucketACL) error)](examples/s3/setbucketacl.go) -* [ListBuckets() []BucketStat](examples/s3/listbuckets.go) -* [ListObjects(bucketName, objectPrefix, recursive, chan<- struct{}) <-chan ObjectStat](examples/s3/listobjects.go) -* [ListIncompleteUploads(bucketName, prefix, recursive, chan<- struct{}) <-chan ObjectMultipartStat](examples/s3/listincompleteuploads.go) +* [ListBuckets() []BucketInfo](examples/s3/listbuckets.go) +* [ListObjects(bucketName, objectPrefix, recursive, chan<- struct{}) <-chan ObjectInfo](examples/s3/listobjects.go) +* [ListIncompleteUploads(bucketName, prefix, recursive, chan<- struct{}) <-chan ObjectMultipartInfo](examples/s3/listincompleteuploads.go) ### Object Operations. * [PutObject(bucketName, objectName, io.Reader, size, contentType) error](examples/s3/putobject.go) -* [GetObject(bucketName, objectName) (io.ReadCloser, ObjectStat, error)](examples/s3/getobject.go) -* [StatObject(bucketName, objectName) (ObjectStat, error)](examples/s3/statobject.go) +* [GetObject(bucketName, objectName) (io.ReadCloser, ObjectInfo, error)](examples/s3/getobject.go) +* [StatObject(bucketName, objectName) (ObjectInfo, error)](examples/s3/statobject.go) * [RemoveObject(bucketName, objectName) error](examples/s3/removeobject.go) * [RemoveIncompleteUpload(bucketName, objectName) <-chan error](examples/s3/removeincompleteupload.go) diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-definitions.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-definitions.go index 123de1850..fd0a613a8 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-definitions.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-definitions.go @@ -16,26 +16,27 @@ package minio -import ( - "io" - "time" -) +import "time" -// BucketStat container for bucket metadata. -type BucketStat struct { +// BucketInfo container for bucket metadata. +type BucketInfo struct { // The name of the bucket. Name string // Date the bucket was created. CreationDate time.Time } -// ObjectStat container for object metadata. -type ObjectStat struct { - ETag string - Key string - LastModified time.Time - Size int64 - ContentType string +// ObjectInfo container for object metadata. +type ObjectInfo struct { + // An ETag is optionally set to md5sum of an object. In case of multipart objects, + // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of + // each parts concatenated into one string. + ETag string + + Key string // Name of the object + LastModified time.Time // Date and time the object was last modified. + Size int64 // Size in bytes of the object. + ContentType string // A standard MIME type describing the format of the object data. // Owner name. Owner struct { @@ -50,18 +51,21 @@ type ObjectStat struct { Err error } -// ObjectMultipartStat container for multipart object metadata. -type ObjectMultipartStat struct { +// ObjectMultipartInfo container for multipart object metadata. +type ObjectMultipartInfo struct { // Date and time at which the multipart upload was initiated. Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` Initiator initiator Owner owner + // The type of storage to use for the object. Defaults to 'STANDARD'. StorageClass string // Key of the object for which the multipart upload was initiated. - Key string + Key string + + // Size in bytes of the object. Size int64 // Upload ID that identifies the multipart upload. @@ -70,24 +74,3 @@ type ObjectMultipartStat struct { // Error Err error } - -// partData - container for each part. -type partData struct { - MD5Sum []byte - Sha256Sum []byte - ReadCloser io.ReadCloser - Size int64 - Number int // partData number. - - // Error - Err error -} - -// putObjectData - container for each single PUT operation. -type putObjectData struct { - MD5Sum []byte - Sha256Sum []byte - ReadCloser io.ReadCloser - Size int64 - ContentType string -} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-error-response.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-error-response.go index 4d7e30fc1..ca15164f9 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-error-response.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-error-response.go @@ -17,7 +17,6 @@ package minio import ( - "encoding/json" "encoding/xml" "fmt" "net/http" @@ -36,7 +35,7 @@ import ( */ -// ErrorResponse is the type error returned by some API operations. +// ErrorResponse - Is the typed error returned by all API operations. type ErrorResponse struct { XMLName xml.Name `xml:"Error" json:"-"` Code string @@ -46,12 +45,13 @@ type ErrorResponse struct { RequestID string `xml:"RequestId"` HostID string `xml:"HostId"` - // This is a new undocumented field, set only if available. + // Region where the bucket is located. This header is returned + // only in HEAD bucket and ListObjects response. AmzBucketRegion string } -// ToErrorResponse returns parsed ErrorResponse struct, if input is nil or not ErrorResponse return value is nil -// this fuction is useful when some one wants to dig deeper into the error structures over the network. +// ToErrorResponse - Returns parsed ErrorResponse struct from body and +// http headers. // // For example: // @@ -61,7 +61,6 @@ type ErrorResponse struct { // reader, stat, err := s3.GetObject(...) // if err != nil { // resp := s3.ToErrorResponse(err) -// fmt.Println(resp.ToXML()) // } // ... func ToErrorResponse(err error) ErrorResponse { @@ -73,47 +72,32 @@ func ToErrorResponse(err error) ErrorResponse { } } -// ToXML send raw xml marshalled as string -func (e ErrorResponse) ToXML() string { - b, err := xml.Marshal(&e) - if err != nil { - panic(err) - } - return string(b) -} - -// ToJSON send raw json marshalled as string -func (e ErrorResponse) ToJSON() string { - b, err := json.Marshal(&e) - if err != nil { - panic(err) - } - return string(b) -} - -// Error formats HTTP error string +// Error - Returns HTTP error string func (e ErrorResponse) Error() string { return e.Message } -// Common reporting string +// Common string for errors to report issue location in unexpected +// cases. const ( reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." ) -// HTTPRespToErrorResponse returns a new encoded ErrorResponse structure +// HTTPRespToErrorResponse returns a new encoded ErrorResponse +// structure as error. func HTTPRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { if resp == nil { msg := "Response is empty. " + reportIssue return ErrInvalidArgument(msg) } - var errorResponse ErrorResponse - err := xmlDecoder(resp.Body, &errorResponse) + var errResp ErrorResponse + err := xmlDecoder(resp.Body, &errResp) + // Xml decoding failed with no body, fall back to HTTP headers. if err != nil { switch resp.StatusCode { case http.StatusNotFound: if objectName == "" { - errorResponse = ErrorResponse{ + errResp = ErrorResponse{ Code: "NoSuchBucket", Message: "The specified bucket does not exist.", BucketName: bucketName, @@ -122,7 +106,7 @@ func HTTPRespToErrorResponse(resp *http.Response, bucketName, objectName string) AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), } } else { - errorResponse = ErrorResponse{ + errResp = ErrorResponse{ Code: "NoSuchKey", Message: "The specified key does not exist.", BucketName: bucketName, @@ -133,7 +117,7 @@ func HTTPRespToErrorResponse(resp *http.Response, bucketName, objectName string) } } case http.StatusForbidden: - errorResponse = ErrorResponse{ + errResp = ErrorResponse{ Code: "AccessDenied", Message: "Access Denied.", BucketName: bucketName, @@ -143,7 +127,7 @@ func HTTPRespToErrorResponse(resp *http.Response, bucketName, objectName string) AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), } case http.StatusConflict: - errorResponse = ErrorResponse{ + errResp = ErrorResponse{ Code: "Conflict", Message: "Bucket not empty.", BucketName: bucketName, @@ -152,7 +136,7 @@ func HTTPRespToErrorResponse(resp *http.Response, bucketName, objectName string) AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), } default: - errorResponse = ErrorResponse{ + errResp = ErrorResponse{ Code: resp.Status, Message: resp.Status, BucketName: bucketName, @@ -162,10 +146,21 @@ func HTTPRespToErrorResponse(resp *http.Response, bucketName, objectName string) } } } - return errorResponse + + // AccessDenied without a signature mismatch code, usually means + // that the bucket policy has certain restrictions where some API + // operations are not allowed. Handle this case so that top level + // callers can interpret this easily and fall back if needed to a + // lower functionality call. Read each individual API specific + // code for such fallbacks. + if errResp.Code == "AccessDenied" && errResp.Message == "Access Denied" { + errResp.Code = "NotImplemented" + errResp.Message = "Operation is not allowed according to your bucket policy." + } + return errResp } -// ErrEntityTooLarge input size is larger than supported maximum. +// ErrEntityTooLarge - Input size is larger than supported maximum. func ErrEntityTooLarge(totalSize int64, bucketName, objectName string) error { msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size '5GiB' for single PUT operation.", totalSize) return ErrorResponse{ @@ -176,7 +171,19 @@ func ErrEntityTooLarge(totalSize int64, bucketName, objectName string) error { } } -// ErrUnexpectedShortRead unexpected shorter read of input buffer from target. +// ErrEntityTooSmall - Input size is smaller than supported minimum. +func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size '0B' for single PUT operation.", totalSize) + return ErrorResponse{ + Code: "EntityTooLarge", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// ErrUnexpectedShortRead - Unexpected shorter read of input buffer from +// target. func ErrUnexpectedShortRead(totalRead, totalSize int64, bucketName, objectName string) error { msg := fmt.Sprintf("Data read ‘%s’ is shorter than the size ‘%s’ of input buffer.", strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10)) @@ -188,7 +195,7 @@ func ErrUnexpectedShortRead(totalRead, totalSize int64, bucketName, objectName s } } -// ErrUnexpectedEOF unexpected end of file reached. +// ErrUnexpectedEOF - Unexpected end of file reached. func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { msg := fmt.Sprintf("Data read ‘%s’ is not equal to the size ‘%s’ of the input Reader.", strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10)) @@ -200,7 +207,7 @@ func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) } } -// ErrInvalidBucketName - invalid bucket name response. +// ErrInvalidBucketName - Invalid bucket name response. func ErrInvalidBucketName(message string) error { return ErrorResponse{ Code: "InvalidBucketName", @@ -209,7 +216,7 @@ func ErrInvalidBucketName(message string) error { } } -// ErrInvalidObjectName - invalid object name response. +// ErrInvalidObjectName - Invalid object name response. func ErrInvalidObjectName(message string) error { return ErrorResponse{ Code: "NoSuchKey", @@ -218,7 +225,7 @@ func ErrInvalidObjectName(message string) error { } } -// ErrInvalidParts - invalid number of parts. +// ErrInvalidParts - Invalid number of parts. func ErrInvalidParts(expectedParts, uploadedParts int) error { msg := fmt.Sprintf("Unexpected number of parts found Want %d, Got %d", expectedParts, uploadedParts) return ErrorResponse{ @@ -228,11 +235,11 @@ func ErrInvalidParts(expectedParts, uploadedParts int) error { } } -// ErrInvalidObjectPrefix - invalid object prefix response is +// ErrInvalidObjectPrefix - Invalid object prefix response is // similar to object name response. var ErrInvalidObjectPrefix = ErrInvalidObjectName -// ErrInvalidArgument - invalid argument response. +// ErrInvalidArgument - Invalid argument response. func ErrInvalidArgument(message string) error { return ErrorResponse{ Code: "InvalidArgument", diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-fget-object.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-get-object-file.go similarity index 97% rename from Godeps/_workspace/src/github.com/minio/minio-go/api-fget-object.go rename to Godeps/_workspace/src/github.com/minio/minio-go/api-get-object-file.go index ee96a6cb9..b73ed2cb3 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-fget-object.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-get-object-file.go @@ -22,7 +22,7 @@ import ( "path/filepath" ) -// FGetObject - get object to a file. +// FGetObject - download contents of an object to a local file. func (c Client) FGetObject(bucketName, objectName, filePath string) error { // Input validation. if err := isValidBucketName(bucketName); err != nil { diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go index 7596278af..46643a5c7 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go @@ -28,15 +28,18 @@ import ( "time" ) -// GetBucketACL get the permissions on an existing bucket. +// GetBucketACL - Get the permissions on an existing bucket. // // Returned values are: // -// private - owner gets full access. -// public-read - owner gets full access, others get read access. -// public-read-write - owner gets full access, others get full access too. -// authenticated-read - owner gets full access, authenticated users get read access. +// private - Owner gets full access. +// public-read - Owner gets full access, others get read access. +// public-read-write - Owner gets full access, others get full access +// too. +// authenticated-read - Owner gets full access, authenticated users +// get read access. func (c Client) GetBucketACL(bucketName string) (BucketACL, error) { + // Input validation. if err := isValidBucketName(bucketName); err != nil { return "", err } @@ -73,9 +76,10 @@ func (c Client) GetBucketACL(bucketName string) (BucketACL, error) { return "", err } - // We need to avoid following de-serialization check for Google Cloud Storage. - // On Google Cloud Storage "private" canned ACL's policy do not have grant list. - // Treat it as a valid case, check for all other vendors. + // We need to avoid following de-serialization check for Google + // Cloud Storage. On Google Cloud Storage "private" canned ACL's + // policy do not have grant list. Treat it as a valid case, check + // for all other vendors. if !isGoogleEndpoint(c.endpointURL) { if policy.AccessControlList.Grant == nil { errorResponse := ErrorResponse{ @@ -90,8 +94,8 @@ func (c Client) GetBucketACL(bucketName string) (BucketACL, error) { } } - // boolean cues to indentify right canned acls. - var publicRead, publicWrite bool + // Boolean cues to indentify right canned acls. + var publicRead, publicWrite, authenticatedRead bool // Handle grants. grants := policy.AccessControlList.Grant @@ -100,7 +104,8 @@ func (c Client) GetBucketACL(bucketName string) (BucketACL, error) { continue } if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { - return BucketACL("authenticated-read"), nil + authenticatedRead = true + break } else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { publicWrite = true } else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { @@ -108,15 +113,19 @@ func (c Client) GetBucketACL(bucketName string) (BucketACL, error) { } } - // public write and not enabled. return. + // Verify if acl is authenticated read. + if authenticatedRead { + return BucketACL("authenticated-read"), nil + } + // Verify if acl is private. if !publicWrite && !publicRead { return BucketACL("private"), nil } - // public write not enabled but public read is. return. + // Verify if acl is public-read. if !publicWrite && publicRead { return BucketACL("public-read"), nil } - // public read and public write are enabled return. + // Verify if acl is public-read-write. if publicRead && publicWrite { return BucketACL("public-read-write"), nil } @@ -129,47 +138,30 @@ func (c Client) GetBucketACL(bucketName string) (BucketACL, error) { } } -// GetObject gets object content from specified bucket. -// You may also look at GetPartialObject. -func (c Client) GetObject(bucketName, objectName string) (io.ReadCloser, ObjectStat, error) { +// GetObject - returns an seekable, readable object. +func (c Client) GetObject(bucketName, objectName string) (*Object, error) { + // Input validation. if err := isValidBucketName(bucketName); err != nil { - return nil, ObjectStat{}, err + return nil, err } if err := isValidObjectName(objectName); err != nil { - return nil, ObjectStat{}, err + return nil, err } - // get the whole object as a stream, no seek or resume supported for this. - return c.getObject(bucketName, objectName, 0, 0) -} - -// ReadAtCloser readat closer interface. -type ReadAtCloser interface { - io.ReaderAt - io.Closer -} - -// GetObjectPartial returns a io.ReadAt for reading sparse entries. -func (c Client) GetObjectPartial(bucketName, objectName string) (ReadAtCloser, ObjectStat, error) { - if err := isValidBucketName(bucketName); err != nil { - return nil, ObjectStat{}, err - } - if err := isValidObjectName(objectName); err != nil { - return nil, ObjectStat{}, err - } - // Send an explicit stat to get the actual object size. - objectStat, err := c.StatObject(bucketName, objectName) + // Send an explicit info to get the actual object size. + objectInfo, err := c.StatObject(bucketName, objectName) if err != nil { - return nil, ObjectStat{}, err + return nil, err } // Create request channel. - reqCh := make(chan readAtRequest) + reqCh := make(chan readRequest) // Create response channel. - resCh := make(chan readAtResponse) + resCh := make(chan readResponse) // Create done channel. doneCh := make(chan struct{}) - // This routine feeds partial object data as and when the caller reads. + // This routine feeds partial object data as and when the caller + // reads. go func() { defer close(reqCh) defer close(resCh) @@ -185,21 +177,21 @@ func (c Client) GetObjectPartial(bucketName, objectName string) (ReadAtCloser, O // Get shortest length. // NOTE: Last remaining bytes are usually smaller than // req.Buffer size. Use that as the final length. - length := math.Min(float64(len(req.Buffer)), float64(objectStat.Size-req.Offset)) + length := math.Min(float64(len(req.Buffer)), float64(objectInfo.Size-req.Offset)) httpReader, _, err := c.getObject(bucketName, objectName, req.Offset, int64(length)) if err != nil { - resCh <- readAtResponse{ + resCh <- readResponse{ Error: err, } return } size, err := io.ReadFull(httpReader, req.Buffer) if err == io.ErrUnexpectedEOF { - // If an EOF happens after reading some but not all the bytes - // ReadFull returns ErrUnexpectedEOF + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF err = io.EOF } - resCh <- readAtResponse{ + resCh <- readResponse{ Size: int(size), Error: err, } @@ -207,78 +199,148 @@ func (c Client) GetObjectPartial(bucketName, objectName string) (ReadAtCloser, O } }() // Return the readerAt backed by routine. - return newObjectReadAtCloser(reqCh, resCh, doneCh, objectStat.Size), objectStat, nil + return newObject(reqCh, resCh, doneCh, objectInfo), nil } -// response message container to reply back for the request. -type readAtResponse struct { +// Read response message container to reply back for the request. +type readResponse struct { Size int Error error } -// request message container to communicate with internal go-routine. -type readAtRequest struct { +// Read request message container to communicate with internal +// go-routine. +type readRequest struct { Buffer []byte Offset int64 // readAt offset. } -// objectReadAtCloser container for io.ReadAtCloser. -type objectReadAtCloser struct { - // mutex. +// Object represents an open object. It implements Read, ReadAt, +// Seeker, Close for a HTTP stream. +type Object struct { + // Mutex. mutex *sync.Mutex // User allocated and defined. - reqCh chan<- readAtRequest - resCh <-chan readAtResponse + reqCh chan<- readRequest + resCh <-chan readResponse doneCh chan<- struct{} - objectSize int64 + currOffset int64 + objectInfo ObjectInfo // Previous error saved for future calls. prevErr error } -// newObjectReadAtCloser implements a io.ReadSeeker for a HTTP stream. -func newObjectReadAtCloser(reqCh chan<- readAtRequest, resCh <-chan readAtResponse, doneCh chan<- struct{}, objectSize int64) *objectReadAtCloser { - return &objectReadAtCloser{ - mutex: new(sync.Mutex), - reqCh: reqCh, - resCh: resCh, - doneCh: doneCh, - objectSize: objectSize, +// Read reads up to len(p) bytes into p. It returns the number of +// bytes read (0 <= n <= len(p)) and any error encountered. Returns +// io.EOF upon end of file. +func (o *Object) Read(b []byte) (n int, err error) { + if o == nil { + return 0, ErrInvalidArgument("Object is nil") } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // If current offset has reached Size limit, return EOF. + if o.currOffset >= o.objectInfo.Size { + return 0, io.EOF + } + + // Previous prevErr is which was saved in previous operation. + if o.prevErr != nil { + return 0, o.prevErr + } + + // Send current information over control channel to indicate we + // are ready. + reqMsg := readRequest{} + + // Send the offset and pointer to the buffer over the channel. + reqMsg.Buffer = b + reqMsg.Offset = o.currOffset + + // Send read request over the control channel. + o.reqCh <- reqMsg + + // Get data over the response channel. + dataMsg := <-o.resCh + + // Bytes read. + bytesRead := int64(dataMsg.Size) + + // Update current offset. + o.currOffset += bytesRead + + if dataMsg.Error == nil { + // If currOffset read is equal to objectSize + // We have reached end of file, we return io.EOF. + if o.currOffset >= o.objectInfo.Size { + return dataMsg.Size, io.EOF + } + return dataMsg.Size, nil + } + + // Save any error. + o.prevErr = dataMsg.Error + return dataMsg.Size, dataMsg.Error } -// ReadAt reads len(b) bytes from the File starting at byte offset off. -// It returns the number of bytes read and the error, if any. -// ReadAt always returns a non-nil error when n < len(b). -// At end of file, that error is io.EOF. -func (r *objectReadAtCloser) ReadAt(b []byte, offset int64) (int, error) { +// Stat returns the ObjectInfo structure describing object. +func (o *Object) Stat() (ObjectInfo, error) { + if o == nil { + return ObjectInfo{}, ErrInvalidArgument("Object is nil") + } // Locking. - r.mutex.Lock() - defer r.mutex.Unlock() + o.mutex.Lock() + defer o.mutex.Unlock() - // if offset is negative and offset is greater than or equal to object size we return EOF. - if offset < 0 || offset >= r.objectSize { + if o.prevErr != nil { + return ObjectInfo{}, o.prevErr + } + + return o.objectInfo, nil +} + +// ReadAt reads len(b) bytes from the File starting at byte offset +// off. It returns the number of bytes read and the error, if any. +// ReadAt always returns a non-nil error when n < len(b). At end of +// file, that error is io.EOF. +func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { + if o == nil { + return 0, ErrInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // If offset is negative and offset is greater than or equal to + // object size we return EOF. + if offset < 0 || offset >= o.objectInfo.Size { return 0, io.EOF } // prevErr is which was saved in previous operation. - if r.prevErr != nil { - return 0, r.prevErr + if o.prevErr != nil { + return 0, o.prevErr } - // Send current information over control channel to indicate we are ready. - reqMsg := readAtRequest{} + // Send current information over control channel to indicate we + // are ready. + reqMsg := readRequest{} - // Send the current offset and bytes requested. + // Send the offset and pointer to the buffer over the channel. reqMsg.Buffer = b reqMsg.Offset = offset // Send read request over the control channel. - r.reqCh <- reqMsg + o.reqCh <- reqMsg // Get data over the response channel. - dataMsg := <-r.resCh + dataMsg := <-o.resCh // Bytes read. bytesRead := int64(dataMsg.Size) @@ -286,38 +348,109 @@ func (r *objectReadAtCloser) ReadAt(b []byte, offset int64) (int, error) { if dataMsg.Error == nil { // If offset+bytes read is equal to objectSize // we have reached end of file, we return io.EOF. - if offset+bytesRead == r.objectSize { + if offset+bytesRead == o.objectInfo.Size { return dataMsg.Size, io.EOF } return dataMsg.Size, nil } // Save any error. - r.prevErr = dataMsg.Error + o.prevErr = dataMsg.Error return dataMsg.Size, dataMsg.Error } -// Closer is the interface that wraps the basic Close method. +// Seek sets the offset for the next Read or Write to offset, +// interpreted according to whence: 0 means relative to the +// origin of the file, 1 means relative to the current offset, +// and 2 means relative to the end. +// Seek returns the new offset and an error, if any. // -// The behavior of Close after the first call returns error for -// subsequent Close() calls. -func (r *objectReadAtCloser) Close() (err error) { +// Seeking to a negative offset is an error. Seeking to any positive +// offset is legal, subsequent io operations succeed until the +// underlying object is not closed. +func (o *Object) Seek(offset int64, whence int) (n int64, err error) { + if o == nil { + return 0, ErrInvalidArgument("Object is nil") + } + // Locking. - r.mutex.Lock() - defer r.mutex.Unlock() + o.mutex.Lock() + defer o.mutex.Unlock() + + if o.prevErr != nil { + // At EOF seeking is legal, for any other errors we return. + if o.prevErr != io.EOF { + return 0, o.prevErr + } + } + + // Negative offset is valid for whence of '2'. + if offset < 0 && whence != 2 { + return 0, ErrInvalidArgument(fmt.Sprintf("Object: negative position not allowed for %d.", whence)) + } + switch whence { + default: + return 0, ErrInvalidArgument(fmt.Sprintf("Object: invalid whence %d", whence)) + case 0: + if offset > o.objectInfo.Size { + return 0, io.EOF + } + o.currOffset = offset + case 1: + if o.currOffset+offset > o.objectInfo.Size { + return 0, io.EOF + } + o.currOffset += offset + case 2: + // Seeking to positive offset is valid for whence '2', but + // since we are backing a Reader we have reached 'EOF' if + // offset is positive. + if offset > 0 { + return 0, io.EOF + } + // Seeking to negative position not allowed for whence. + if o.objectInfo.Size+offset < 0 { + return 0, ErrInvalidArgument(fmt.Sprintf("Object: Seeking at negative offset not allowed for %d", whence)) + } + o.currOffset += offset + } + // Return the effective offset. + return o.currOffset, nil +} + +// Close - The behavior of Close after the first call returns error +// for subsequent Close() calls. +func (o *Object) Close() (err error) { + if o == nil { + return ErrInvalidArgument("Object is nil") + } + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() // prevErr is which was saved in previous operation. - if r.prevErr != nil { - return r.prevErr + if o.prevErr != nil { + return o.prevErr } // Close successfully. - close(r.doneCh) + close(o.doneCh) // Save this for any subsequent frivolous reads. - errMsg := "objectReadAtCloser: is already closed. Bad file descriptor." - r.prevErr = errors.New(errMsg) - return + errMsg := "Object: Is already closed. Bad file descriptor." + o.prevErr = errors.New(errMsg) + return nil +} + +// newObject instantiates a new *minio.Object* +func newObject(reqCh chan<- readRequest, resCh <-chan readResponse, doneCh chan<- struct{}, objectInfo ObjectInfo) *Object { + return &Object{ + mutex: &sync.Mutex{}, + reqCh: reqCh, + resCh: resCh, + doneCh: doneCh, + objectInfo: objectInfo, + } } // getObject - retrieve object from Object Storage. @@ -327,13 +460,13 @@ func (r *objectReadAtCloser) Close() (err error) { // // For more information about the HTTP Range header. // go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. -func (c Client) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectStat, error) { +func (c Client) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectInfo, error) { // Validate input arguments. if err := isValidBucketName(bucketName); err != nil { - return nil, ObjectStat{}, err + return nil, ObjectInfo{}, err } if err := isValidObjectName(objectName); err != nil { - return nil, ObjectStat{}, err + return nil, ObjectInfo{}, err } customHeader := make(http.Header) @@ -353,16 +486,16 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) ( customHeader: customHeader, }) if err != nil { - return nil, ObjectStat{}, err + return nil, ObjectInfo{}, err } // Execute the request. resp, err := c.do(req) if err != nil { - return nil, ObjectStat{}, err + return nil, ObjectInfo{}, err } if resp != nil { if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { - return nil, ObjectStat{}, HTTPRespToErrorResponse(resp, bucketName, objectName) + return nil, ObjectInfo{}, HTTPRespToErrorResponse(resp, bucketName, objectName) } } @@ -374,7 +507,7 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) ( date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) if err != nil { msg := "Last-Modified time format not recognized. " + reportIssue - return nil, ObjectStat{}, ErrorResponse{ + return nil, ObjectInfo{}, ErrorResponse{ Code: "InternalError", Message: msg, RequestID: resp.Header.Get("x-amz-request-id"), @@ -387,7 +520,7 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) ( if contentType == "" { contentType = "application/octet-stream" } - var objectStat ObjectStat + var objectStat ObjectInfo objectStat.ETag = md5sum objectStat.Key = objectName objectStat.Size = resp.ContentLength diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-list.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-list.go index 8838900a8..7d0ffbaa6 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-list.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-list.go @@ -33,7 +33,7 @@ import ( // fmt.Println(message) // } // -func (c Client) ListBuckets() ([]BucketStat, error) { +func (c Client) ListBuckets() ([]BucketInfo, error) { // Instantiate a new request. req, err := c.newRequest("GET", requestMetadata{}) if err != nil { @@ -64,19 +64,25 @@ func (c Client) ListBuckets() ([]BucketStat, error) { // the specified bucket. If recursion is enabled it would list // all subdirectories and all its contents. // -// Your input paramters are just bucketName, objectPrefix and recursive. If you -// enable recursive as 'true' this function will return back all the -// objects in a given bucket name and object prefix. +// Your input paramters are just bucketName, objectPrefix, recursive +// and a done channel for pro-actively closing the internal go +// routine. If you enable recursive as 'true' this function will +// return back all the objects in a given bucket name and object +// prefix. // // api := client.New(....) +// // Create a done channel. +// doneCh := make(chan struct{}) +// defer close(doneCh) +// // Recurively list all objects in 'mytestbucket' // recursive := true -// for message := range api.ListObjects("mytestbucket", "starthere", recursive) { +// for message := range api.ListObjects("mytestbucket", "starthere", recursive, doneCh) { // fmt.Println(message) // } // -func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectStat { +func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { // Allocate new list objects channel. - objectStatCh := make(chan ObjectStat, 1000) + objectStatCh := make(chan ObjectInfo, 1000) // Default listing is delimited at "/" delimiter := "/" if recursive { @@ -86,7 +92,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don // Validate bucket name. if err := isValidBucketName(bucketName); err != nil { defer close(objectStatCh) - objectStatCh <- ObjectStat{ + objectStatCh <- ObjectInfo{ Err: err, } return objectStatCh @@ -94,14 +100,14 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don // Validate incoming object prefix. if err := isValidObjectPrefix(objectPrefix); err != nil { defer close(objectStatCh) - objectStatCh <- ObjectStat{ + objectStatCh <- ObjectInfo{ Err: err, } return objectStatCh } // Initiate list objects goroutine here. - go func(objectStatCh chan<- ObjectStat) { + go func(objectStatCh chan<- ObjectInfo) { defer close(objectStatCh) // Save marker for next request. var marker string @@ -109,7 +115,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don // Get list of objects a maximum of 1000 per request. result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000) if err != nil { - objectStatCh <- ObjectStat{ + objectStatCh <- ObjectInfo{ Err: err, } return @@ -131,7 +137,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don // Send all common prefixes if any. // NOTE: prefixes are only present if the request is delimited. for _, obj := range result.CommonPrefixes { - object := ObjectStat{} + object := ObjectInfo{} object.Key = obj.Prefix object.Size = 0 select { @@ -181,11 +187,22 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit // using them in http request. urlValues := make(url.Values) // Set object prefix. - urlValues.Set("prefix", urlEncodePath(objectPrefix)) + if objectPrefix != "" { + urlValues.Set("prefix", urlEncodePath(objectPrefix)) + } // Set object marker. - urlValues.Set("marker", urlEncodePath(objectMarker)) + if objectMarker != "" { + urlValues.Set("marker", urlEncodePath(objectMarker)) + } // Set delimiter. - urlValues.Set("delimiter", delimiter) + if delimiter != "" { + urlValues.Set("delimiter", delimiter) + } + + // maxkeys should default to 1000 or less. + if maxkeys == 0 || maxkeys > 1000 { + maxkeys = 1000 + } // Set max keys. urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) @@ -223,26 +240,31 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit // objectPrefix from the specified bucket. If recursion is enabled // it would list all subdirectories and all its contents. // -// Your input paramters are just bucketName, objectPrefix and recursive. +// Your input paramters are just bucketName, objectPrefix, recursive +// and a done channel to proactively close the internal go routine. // If you enable recursive as 'true' this function will return back all // the multipart objects in a given bucket name. // // api := client.New(....) +// // Create a done channel. +// doneCh := make(chan struct{}) +// defer close(doneCh) +// // Recurively list all objects in 'mytestbucket' // recursive := true // for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) { // fmt.Println(message) // } // -func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartStat { +func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { // Turn on size aggregation of individual parts. isAggregateSize := true return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh) } // listIncompleteUploads lists all incomplete uploads. -func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartStat { +func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { // Allocate channel for multipart uploads. - objectMultipartStatCh := make(chan ObjectMultipartStat, 1000) + objectMultipartStatCh := make(chan ObjectMultipartInfo, 1000) // Delimiter is set to "/" by default. delimiter := "/" if recursive { @@ -252,7 +274,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive // Validate bucket name. if err := isValidBucketName(bucketName); err != nil { defer close(objectMultipartStatCh) - objectMultipartStatCh <- ObjectMultipartStat{ + objectMultipartStatCh <- ObjectMultipartInfo{ Err: err, } return objectMultipartStatCh @@ -260,12 +282,12 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive // Validate incoming object prefix. if err := isValidObjectPrefix(objectPrefix); err != nil { defer close(objectMultipartStatCh) - objectMultipartStatCh <- ObjectMultipartStat{ + objectMultipartStatCh <- ObjectMultipartInfo{ Err: err, } return objectMultipartStatCh } - go func(objectMultipartStatCh chan<- ObjectMultipartStat) { + go func(objectMultipartStatCh chan<- ObjectMultipartInfo) { defer close(objectMultipartStatCh) // object and upload ID marker for future requests. var objectMarker string @@ -274,7 +296,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive // list all multipart uploads. result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000) if err != nil { - objectMultipartStatCh <- ObjectMultipartStat{ + objectMultipartStatCh <- ObjectMultipartInfo{ Err: err, } return @@ -289,7 +311,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive // Get total multipart size. obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID) if err != nil { - objectMultipartStatCh <- ObjectMultipartStat{ + objectMultipartStatCh <- ObjectMultipartInfo{ Err: err, } } @@ -305,7 +327,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive // Send all common prefixes if any. // NOTE: prefixes are only present if the request is delimited. for _, obj := range result.CommonPrefixes { - object := ObjectMultipartStat{} + object := ObjectMultipartInfo{} object.Key = obj.Prefix object.Size = 0 select { @@ -343,13 +365,26 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, // Set uploads. urlValues.Set("uploads", "") // Set object key marker. - urlValues.Set("key-marker", urlEncodePath(keyMarker)) + if keyMarker != "" { + urlValues.Set("key-marker", urlEncodePath(keyMarker)) + } // Set upload id marker. - urlValues.Set("upload-id-marker", uploadIDMarker) + if uploadIDMarker != "" { + urlValues.Set("upload-id-marker", uploadIDMarker) + } // Set prefix marker. - urlValues.Set("prefix", urlEncodePath(prefix)) + if prefix != "" { + urlValues.Set("prefix", urlEncodePath(prefix)) + } // Set delimiter. - urlValues.Set("delimiter", delimiter) + if delimiter != "" { + urlValues.Set("delimiter", delimiter) + } + + // maxUploads should be 1000 or less. + if maxUploads == 0 || maxUploads > 1000 { + maxUploads = 1000 + } // Set max-uploads. urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) @@ -445,12 +480,15 @@ func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) ( } // listObjectPartsQuery (List Parts query) -// - lists some or all (up to 1000) parts that have been uploaded for a specific multipart upload +// - lists some or all (up to 1000) parts that have been uploaded +// for a specific multipart upload // -// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. -// request paramters :- +// You can use the request parameters as selection criteria to return +// a subset of the uploads in a bucket, request paramters :- // --------- -// ?part-number-marker - Specifies the part after which listing should begin. +// ?part-number-marker - Specifies the part after which listing should +// begin. +// ?max-parts - Maximum parts to be listed per request. func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) { // Get resources properly escaped and lined up before using them in http request. urlValues := make(url.Values) @@ -458,6 +496,11 @@ func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, pa urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker)) // Set upload id. urlValues.Set("uploadId", uploadID) + + // maxParts should be 1000 or less. + if maxParts == 0 || maxParts > 1000 { + maxParts = 1000 + } // Set max parts. urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-presigned.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-presigned.go index d46623631..e1b40e0e3 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-presigned.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-presigned.go @@ -21,7 +21,7 @@ import ( "time" ) -// PresignedGetObject returns a presigned URL to access an object without credentials. +// PresignedGetObject - Returns a presigned URL to access an object without credentials. // Expires maximum is 7days - ie. 604800 and minimum is 1. func (c Client) PresignedGetObject(bucketName, objectName string, expires time.Duration) (string, error) { // Input validation. @@ -50,7 +50,7 @@ func (c Client) PresignedGetObject(bucketName, objectName string, expires time.D return req.URL.String(), nil } -// PresignedPutObject returns a presigned URL to upload an object without credentials. +// PresignedPutObject - Returns a presigned URL to upload an object without credentials. // Expires maximum is 7days - ie. 604800 and minimum is 1. func (c Client) PresignedPutObject(bucketName, objectName string, expires time.Duration) (string, error) { // Input validation. @@ -79,7 +79,7 @@ func (c Client) PresignedPutObject(bucketName, objectName string, expires time.D return req.URL.String(), nil } -// PresignedPostPolicy returns POST form data to upload an object at a location. +// PresignedPostPolicy - Returns POST form data to upload an object at a location. func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { // Validate input arguments. if p.expiration.IsZero() { @@ -93,7 +93,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { } bucketName := p.formData["bucket"] - // Fetch the location. + // Fetch the bucket location. location, err := c.getBucketLocation(bucketName) if err != nil { return nil, err @@ -101,6 +101,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { // Keep time. t := time.Now().UTC() + // For signature version '2' handle here. if c.signature.isV2() { policyBase64 := p.base64() p.formData["policy"] = policyBase64 @@ -135,7 +136,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { condition: "$x-amz-credential", value: credential, }) - // get base64 encoded policy. + // Get base64 encoded policy. policyBase64 := p.base64() // Fill in the form data. p.formData["policy"] = policyBase64 diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go index e1880d9f8..07648a24d 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go @@ -97,7 +97,7 @@ func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location str } // Set get bucket location always as path style. - targetURL := c.endpointURL + targetURL := *c.endpointURL if bucketName != "" { // If endpoint supports virtual host style use that always. // Currently only S3 and Google Cloud Storage would support this. @@ -132,7 +132,7 @@ func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location str // If location is not 'us-east-1' create bucket location config. if location != "us-east-1" && location != "" { - createBucketConfig := new(createBucketConfiguration) + createBucketConfig := createBucketConfiguration{} createBucketConfig.Location = location var createBucketConfigBytes []byte createBucketConfigBytes, err = xml.Marshal(createBucketConfig) diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-fput-object.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-file.go similarity index 67% rename from Godeps/_workspace/src/github.com/minio/minio-go/api-fput-object.go rename to Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-file.go index 00b10aabb..5bc92d3bc 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-fput-object.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-file.go @@ -28,7 +28,8 @@ import ( "sort" ) -// getUploadID if already present for object name or initiate a request to fetch a new upload id. +// getUploadID - fetch upload id if already present for an object name +// or initiate a new request to fetch a new upload id. func (c Client) getUploadID(bucketName, objectName, contentType string) (string, error) { // Input validation. if err := isValidBucketName(bucketName); err != nil { @@ -60,84 +61,16 @@ func (c Client) getUploadID(bucketName, objectName, contentType string) (string, return uploadID, nil } -// FPutObject - put object a file. -func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (int64, error) { - // Input validation. - if err := isValidBucketName(bucketName); err != nil { - return 0, err - } - if err := isValidObjectName(objectName); err != nil { - return 0, err - } - - // Open the referenced file. - fileData, err := os.Open(filePath) - // If any error fail quickly here. - if err != nil { - return 0, err - } - defer fileData.Close() - - // Save the file stat. - fileStat, err := fileData.Stat() - if err != nil { - return 0, err - } - - // Save the file size. - fileSize := fileStat.Size() - if fileSize > int64(maxMultipartPutObjectSize) { - return 0, ErrInvalidArgument("Input file size is bigger than the supported maximum of 5TiB.") - } - - // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs. - // Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers. - if isGoogleEndpoint(c.endpointURL) { - if fileSize > int64(maxSinglePutObjectSize) { - return 0, ErrorResponse{ - Code: "NotImplemented", - Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize), - Key: objectName, - BucketName: bucketName, - } - } - // Do not compute MD5 for Google Cloud Storage. Uploads upto 5GiB in size. - n, err := c.putNoChecksum(bucketName, objectName, fileData, fileSize, contentType) - return n, err - } - - // NOTE: S3 doesn't allow anonymous multipart requests. - if isAmazonEndpoint(c.endpointURL) && c.anonymous { - if fileSize > int64(maxSinglePutObjectSize) { - return 0, ErrorResponse{ - Code: "NotImplemented", - Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", fileSize), - Key: objectName, - BucketName: bucketName, - } - } - // Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GiB in size. - n, err := c.putAnonymous(bucketName, objectName, fileData, fileSize, contentType) - return n, err - } - - // Small object upload is initiated for uploads for input data size smaller than 5MiB. - if fileSize < minimumPartSize { - return c.putSmallObject(bucketName, objectName, fileData, fileSize, contentType) - } - return c.fputLargeObject(bucketName, objectName, fileData, fileSize, contentType) -} - -// computeHash - calculates MD5 and Sha256 for an input read Seeker. +// computeHash - Calculates MD5 and SHA256 for an input read Seeker. func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) { - // MD5 and Sha256 hasher. - var hashMD5, hashSha256 hash.Hash - // MD5 and Sha256 hasher. + // MD5 and SHA256 hasher. + var hashMD5, hashSHA256 hash.Hash + // MD5 and SHA256 hasher. hashMD5 = md5.New() hashWriter := io.MultiWriter(hashMD5) if c.signature.isV4() { - hashSha256 = sha256.New() - hashWriter = io.MultiWriter(hashMD5, hashSha256) + hashSHA256 = sha256.New() + hashWriter = io.MultiWriter(hashMD5, hashSHA256) } size, err = io.Copy(hashWriter, reader) @@ -153,12 +86,13 @@ func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, siz // Finalize md5shum and sha256 sum. md5Sum = hashMD5.Sum(nil) if c.signature.isV4() { - sha256Sum = hashSha256.Sum(nil) + sha256Sum = hashSHA256.Sum(nil) } return md5Sum, sha256Sum, size, nil } -func (c Client) fputLargeObject(bucketName, objectName string, fileData *os.File, fileSize int64, contentType string) (int64, error) { +// FPutObject - Create an object in a bucket, with contents from file at filePath. +func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) { // Input validation. if err := isValidBucketName(bucketName); err != nil { return 0, err @@ -167,27 +101,119 @@ func (c Client) fputLargeObject(bucketName, objectName string, fileData *os.File return 0, err } - // getUploadID for an object, initiates a new multipart request + // Open the referenced file. + fileReader, err := os.Open(filePath) + // If any error fail quickly here. + if err != nil { + return 0, err + } + defer fileReader.Close() + + // Save the file stat. + fileStat, err := fileReader.Stat() + if err != nil { + return 0, err + } + + // Save the file size. + fileSize := fileStat.Size() + + // Check for largest object size allowed. + if fileSize > int64(maxMultipartPutObjectSize) { + return 0, ErrEntityTooLarge(fileSize, bucketName, objectName) + } + + // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs. + // Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers. + if isGoogleEndpoint(c.endpointURL) { + if fileSize > int64(maxSinglePutObjectSize) { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize), + Key: objectName, + BucketName: bucketName, + } + } + // Do not compute MD5 for Google Cloud Storage. Uploads upto 5GiB in size. + return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType) + } + + // NOTE: S3 doesn't allow anonymous multipart requests. + if isAmazonEndpoint(c.endpointURL) && c.anonymous { + if fileSize > int64(maxSinglePutObjectSize) { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", fileSize), + Key: objectName, + BucketName: bucketName, + } + } + // Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GiB in size. + return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType) + } + + // Small object upload is initiated for uploads for input data size smaller than 5MiB. + if fileSize < minimumPartSize { + return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType) + } + // Upload all large objects as multipart. + n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, contentType) + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "NotImplemented" { + // If size of file is greater than '5GiB' fail. + if fileSize > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(fileSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType) + } + return n, err + } + return n, nil +} + +// putObjectMultipartFromFile - Creates object from contents of *os.File +// +// NOTE: This function is meant to be used for readers with local +// file as in *os.File. This function resumes by skipping all the +// necessary parts which were already uploaded by verifying them +// against MD5SUM of each individual parts. This function also +// effectively utilizes file system capabilities of reading from +// specific sections and not having to create temporary files. +func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader *os.File, fileSize int64, contentType string) (int64, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + + // Get upload id for an object, initiates a new multipart request // if it cannot find any previously partially uploaded object. uploadID, err := c.getUploadID(bucketName, objectName, contentType) if err != nil { return 0, err } - // total data read and written to server. should be equal to 'size' at the end of the call. + // Total data read and written to server. should be equal to 'size' at the end of the call. var totalUploadedSize int64 // Complete multipart upload. var completeMultipartUpload completeMultipartUpload - // Fetch previously upload parts and save the total size. + // Fetch previously upload parts. partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) if err != nil { return 0, err } + // Previous maximum part size var prevMaxPartSize int64 - // Loop through all parts and calculate totalUploadedSize. + // Loop through all parts and fetch prevMaxPartSize. for _, partInfo := range partsInfo { // Choose the maximum part size. if partInfo.Size >= prevMaxPartSize { @@ -197,7 +223,7 @@ func (c Client) fputLargeObject(bucketName, objectName string, fileData *os.File // Calculate the optimal part size for a given file size. partSize := optimalPartSize(fileSize) - // If prevMaxPartSize is set use that. + // Use prevMaxPartSize if available. if prevMaxPartSize != 0 { partSize = prevMaxPartSize } @@ -205,52 +231,39 @@ func (c Client) fputLargeObject(bucketName, objectName string, fileData *os.File // Part number always starts with '0'. partNumber := 0 - // Loop through until EOF. + // Upload each part until fileSize. for totalUploadedSize < fileSize { // Increment part number. partNumber++ // Get a section reader on a particular offset. - sectionReader := io.NewSectionReader(fileData, totalUploadedSize, partSize) + sectionReader := io.NewSectionReader(fileReader, totalUploadedSize, partSize) - // Calculates MD5 and Sha256 sum for a section reader. + // Calculates MD5 and SHA256 sum for a section reader. md5Sum, sha256Sum, size, err := c.computeHash(sectionReader) if err != nil { return 0, err } - // Save all the part metadata. - prtData := partData{ - ReadCloser: ioutil.NopCloser(sectionReader), - Size: size, - MD5Sum: md5Sum, - Sha256Sum: sha256Sum, - Number: partNumber, // Part number to be uploaded. - } - - // If part not uploaded proceed to upload. + // Verify if part was not uploaded. if !isPartUploaded(objectPart{ - ETag: hex.EncodeToString(prtData.MD5Sum), - PartNumber: prtData.Number, + ETag: hex.EncodeToString(md5Sum), + PartNumber: partNumber, }, partsInfo) { - // Upload the part. - objPart, err := c.uploadPart(bucketName, objectName, uploadID, prtData) + // Proceed to upload the part. + objPart, err := c.uploadPart(bucketName, objectName, uploadID, ioutil.NopCloser(sectionReader), partNumber, md5Sum, sha256Sum, size) if err != nil { - prtData.ReadCloser.Close() return totalUploadedSize, err } // Save successfully uploaded part metadata. - partsInfo[prtData.Number] = objPart + partsInfo[partNumber] = objPart } - // Close the read closer for temporary file. - prtData.ReadCloser.Close() - // Save successfully uploaded size. - totalUploadedSize += prtData.Size + totalUploadedSize += size } - // if totalUploadedSize is different than the file 'size'. Do not complete the request throw an error. + // Verify if we uploaded all data. if totalUploadedSize != fileSize { return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName) } @@ -263,7 +276,7 @@ func (c Client) fputLargeObject(bucketName, objectName string, fileData *os.File completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart) } - // If partNumber is different than total list of parts, error out. + // Verify if partNumber is different than total list of parts. if partNumber != len(completeMultipartUpload.Parts) { return totalUploadedSize, ErrInvalidParts(partNumber, len(completeMultipartUpload.Parts)) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-multipart.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-multipart.go new file mode 100644 index 000000000..6cacc9800 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-multipart.go @@ -0,0 +1,421 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/hex" + "encoding/xml" + "hash" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "sort" + "strconv" + "strings" +) + +// Verify if reader is *os.File +func isFile(reader io.Reader) (ok bool) { + _, ok = reader.(*os.File) + return +} + +// Verify if reader is *minio.Object +func isObject(reader io.Reader) (ok bool) { + _, ok = reader.(*Object) + return +} + +// Verify if reader is a generic ReaderAt +func isReadAt(reader io.Reader) (ok bool) { + _, ok = reader.(io.ReaderAt) + return +} + +// hashCopyN - Calculates Md5sum and SHA256sum for upto partSize amount of bytes. +func (c Client) hashCopyN(writer io.ReadWriteSeeker, reader io.Reader, partSize int64) (md5Sum, sha256Sum []byte, size int64, err error) { + // MD5 and SHA256 hasher. + var hashMD5, hashSHA256 hash.Hash + // MD5 and SHA256 hasher. + hashMD5 = md5.New() + hashWriter := io.MultiWriter(writer, hashMD5) + if c.signature.isV4() { + hashSHA256 = sha256.New() + hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256) + } + + // Copies to input at writer. + size, err = io.CopyN(hashWriter, reader, partSize) + if err != nil { + // If not EOF return error right here. + if err != io.EOF { + return nil, nil, 0, err + } + } + + // Seek back to beginning of input, any error fail right here. + if _, err := writer.Seek(0, 0); err != nil { + return nil, nil, 0, err + } + + // Finalize md5shum and sha256 sum. + md5Sum = hashMD5.Sum(nil) + if c.signature.isV4() { + sha256Sum = hashSHA256.Sum(nil) + } + return md5Sum, sha256Sum, size, err +} + +// Comprehensive put object operation involving multipart resumable uploads. +// +// Following code handles these types of readers. +// +// - *os.File +// - *minio.Object +// - Any reader which has a method 'ReadAt()' +// +// If we exhaust all the known types, code proceeds to use stream as +// is where each part is re-downloaded, checksummed and verified +// before upload. +func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, contentType string) (n int64, err error) { + if size > 0 && size >= minimumPartSize { + // Verify if reader is *os.File, then use file system functionalities. + if isFile(reader) { + return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, contentType) + } + // Verify if reader is *minio.Object or io.ReaderAt. + // NOTE: Verification of object is kept for a specific purpose + // while it is going to be duck typed similar to io.ReaderAt. + // It is to indicate that *minio.Object implements io.ReaderAt. + // and such a functionality is used in the subsequent code + // path. + if isObject(reader) || isReadAt(reader) { + return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, contentType) + } + } + // For any other data size and reader type we do generic multipart + // approach by staging data in temporary files and uploading them. + return c.putObjectMultipartStream(bucketName, objectName, reader, size, contentType) +} + +// putObjectStream uploads files bigger than 5MiB, and also supports +// special case where size is unknown i.e '-1'. +func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, contentType string) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + + // getUploadID for an object, initiates a new multipart request + // if it cannot find any previously partially uploaded object. + uploadID, err := c.getUploadID(bucketName, objectName, contentType) + if err != nil { + return 0, err + } + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var completeMultipartUpload completeMultipartUpload + + // Fetch previously upload parts. + partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) + if err != nil { + return 0, err + } + // Previous maximum part size + var prevMaxPartSize int64 + // Loop through all parts and calculate totalUploadedSize. + for _, partInfo := range partsInfo { + // Choose the maximum part size. + if partInfo.Size >= prevMaxPartSize { + prevMaxPartSize = partInfo.Size + } + } + + // Calculate the optimal part size for a given size. + partSize := optimalPartSize(size) + // Use prevMaxPartSize if available. + if prevMaxPartSize != 0 { + partSize = prevMaxPartSize + } + + // Part number always starts with '0'. + partNumber := 0 + + // Upload each part until EOF. + for { + // Increment part number. + partNumber++ + + // Initialize a new temporary file. + tmpFile, err := newTempFile("multiparts$-putobject-stream") + if err != nil { + return 0, err + } + + // Calculates MD5 and SHA256 sum while copying partSize bytes into tmpFile. + md5Sum, sha256Sum, size, rErr := c.hashCopyN(tmpFile, reader, partSize) + if rErr != nil { + if rErr != io.EOF { + return 0, rErr + } + } + + // Verify if part was not uploaded. + if !isPartUploaded(objectPart{ + ETag: hex.EncodeToString(md5Sum), + PartNumber: partNumber, + }, partsInfo) { + // Proceed to upload the part. + objPart, err := c.uploadPart(bucketName, objectName, uploadID, tmpFile, partNumber, md5Sum, sha256Sum, size) + if err != nil { + // Close the temporary file upon any error. + tmpFile.Close() + return 0, err + } + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + } + + // Close the temporary file. + tmpFile.Close() + + // If read error was an EOF, break out of the loop. + if rErr == io.EOF { + break + } + } + + // Verify if we uploaded all the data. + if size > 0 { + if totalUploadedSize != size { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + } + + // Loop over uploaded parts to save them in a Parts array before completing the multipart request. + for _, part := range partsInfo { + var complPart completePart + complPart.ETag = part.ETag + complPart.PartNumber = part.PartNumber + completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart) + // Save successfully uploaded size. + totalUploadedSize += part.Size + } + + // Verify if partNumber is different than total list of parts. + if partNumber != len(completeMultipartUpload.Parts) { + return totalUploadedSize, ErrInvalidParts(partNumber, len(completeMultipartUpload.Parts)) + } + + // Sort all completed parts. + sort.Sort(completedParts(completeMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload) + if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} + +// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. +func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return initiateMultipartUploadResult{}, err + } + if err := isValidObjectName(objectName); err != nil { + return initiateMultipartUploadResult{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploads", "") + + if contentType == "" { + contentType = "application/octet-stream" + } + + // Set ContentType header. + customHeader := make(http.Header) + customHeader.Set("Content-Type", contentType) + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: customHeader, + } + + // Instantiate the request. + req, err := c.newRequest("POST", reqMetadata) + if err != nil { + return initiateMultipartUploadResult{}, err + } + + // Execute the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return initiateMultipartUploadResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return initiateMultipartUploadResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode xml for new multipart upload. + initiateMultipartUploadResult := initiateMultipartUploadResult{} + err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) + if err != nil { + return initiateMultipartUploadResult, err + } + return initiateMultipartUploadResult, nil +} + +// uploadPart - Uploads a part in a multipart upload. +func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.ReadCloser, partNumber int, md5Sum, sha256Sum []byte, size int64) (objectPart, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return objectPart{}, err + } + if err := isValidObjectName(objectName); err != nil { + return objectPart{}, err + } + if size > maxPartSize { + return objectPart{}, ErrEntityTooLarge(size, bucketName, objectName) + } + if size <= -1 { + return objectPart{}, ErrEntityTooSmall(size, bucketName, objectName) + } + if partNumber <= 0 { + return objectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.") + } + if uploadID == "" { + return objectPart{}, ErrInvalidArgument("UploadID cannot be empty.") + } + + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number. + urlValues.Set("partNumber", strconv.Itoa(partNumber)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: reader, + contentLength: size, + contentMD5Bytes: md5Sum, + contentSHA256Bytes: sha256Sum, + } + + // Instantiate a request. + req, err := c.newRequest("PUT", reqMetadata) + if err != nil { + return objectPart{}, err + } + // Execute the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return objectPart{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return objectPart{}, HTTPRespToErrorResponse(resp, bucketName, objectName) + } + } + // Once successfully uploaded, return completed part. + objPart := objectPart{} + objPart.Size = size + objPart.PartNumber = partNumber + // Trim off the odd double quotes from ETag in the beginning and end. + objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"") + return objPart, nil +} + +// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. +func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return completeMultipartUploadResult{}, err + } + if err := isValidObjectName(objectName); err != nil { + return completeMultipartUploadResult{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + + // Marshal complete multipart body. + completeMultipartUploadBytes, err := xml.Marshal(complete) + if err != nil { + return completeMultipartUploadResult{}, err + } + + // Instantiate all the complete multipart buffer. + completeMultipartUploadBuffer := bytes.NewBuffer(completeMultipartUploadBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: ioutil.NopCloser(completeMultipartUploadBuffer), + contentLength: int64(completeMultipartUploadBuffer.Len()), + contentSHA256Bytes: sum256(completeMultipartUploadBuffer.Bytes()), + } + + // Instantiate the request. + req, err := c.newRequest("POST", reqMetadata) + if err != nil { + return completeMultipartUploadResult{}, err + } + + // Execute the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return completeMultipartUploadResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return completeMultipartUploadResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode completed multipart upload response on success. + completeMultipartUploadResult := completeMultipartUploadResult{} + err = xmlDecoder(resp.Body, &completeMultipartUploadResult) + if err != nil { + return completeMultipartUploadResult, err + } + return completeMultipartUploadResult, nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-partial.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-partial.go deleted file mode 100644 index 8c05d8858..000000000 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-partial.go +++ /dev/null @@ -1,378 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "crypto/md5" - "crypto/sha256" - "errors" - "fmt" - "hash" - "io" - "io/ioutil" - "sort" -) - -// PutObjectPartial put object partial. -func (c Client) PutObjectPartial(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error) { - // Input validation. - if err := isValidBucketName(bucketName); err != nil { - return 0, err - } - if err := isValidObjectName(objectName); err != nil { - return 0, err - } - // Input size negative should return error. - if size < 0 { - return 0, ErrInvalidArgument("Input file size cannot be negative.") - } - // Input size bigger than 5TiB should fail. - if size > int64(maxMultipartPutObjectSize) { - return 0, ErrInvalidArgument("Input file size is bigger than the supported maximum of 5TiB.") - } - - // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. - // So we fall back to single PUT operation with the maximum limit of 5GiB. - if isGoogleEndpoint(c.endpointURL) { - if size > int64(maxSinglePutObjectSize) { - return 0, ErrorResponse{ - Code: "NotImplemented", - Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", size), - Key: objectName, - BucketName: bucketName, - } - } - // Do not compute MD5 for Google Cloud Storage. Uploads upto 5GiB in size. - n, err := c.putPartialNoChksum(bucketName, objectName, data, size, contentType) - return n, err - } - - // NOTE: S3 doesn't allow anonymous multipart requests. - if isAmazonEndpoint(c.endpointURL) && c.anonymous { - if size > int64(maxSinglePutObjectSize) { - return 0, ErrorResponse{ - Code: "NotImplemented", - Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", size), - Key: objectName, - BucketName: bucketName, - } - } - // Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GiB in size. - n, err := c.putPartialAnonymous(bucketName, objectName, data, size, contentType) - return n, err - } - - // Small file upload is initiated for uploads for input data size smaller than 5MiB. - if size < minimumPartSize { - n, err = c.putPartialSmallObject(bucketName, objectName, data, size, contentType) - return n, err - } - n, err = c.putPartialLargeObject(bucketName, objectName, data, size, contentType) - return n, err - -} - -// putNoChecksumPartial special function used Google Cloud Storage. This special function -// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. -func (c Client) putPartialNoChksum(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error) { - // Input validation. - if err := isValidBucketName(bucketName); err != nil { - return 0, err - } - if err := isValidObjectName(objectName); err != nil { - return 0, err - } - if size > maxPartSize { - return 0, ErrEntityTooLarge(size, bucketName, objectName) - } - - // Create a new pipe to stage the reads. - reader, writer := io.Pipe() - - // readAtOffset to carry future offsets. - var readAtOffset int64 - - // readAt defaults to reading at 5MiB buffer. - readAtBuffer := make([]byte, 1024*1024*5) - - // Initiate a routine to start writing. - go func() { - for { - readAtSize, rerr := data.ReadAt(readAtBuffer, readAtOffset) - if rerr != nil { - if rerr != io.EOF { - writer.CloseWithError(rerr) - return - } - } - writeSize, werr := writer.Write(readAtBuffer[:readAtSize]) - if werr != nil { - writer.CloseWithError(werr) - return - } - if readAtSize != writeSize { - writer.CloseWithError(errors.New("Something really bad happened here. " + reportIssue)) - return - } - readAtOffset += int64(writeSize) - if rerr == io.EOF { - writer.Close() - return - } - } - }() - // For anonymous requests, we will not calculate sha256 and md5sum. - putObjData := putObjectData{ - MD5Sum: nil, - Sha256Sum: nil, - ReadCloser: reader, - Size: size, - ContentType: contentType, - } - // Execute put object. - st, err := c.putObject(bucketName, objectName, putObjData) - if err != nil { - return 0, err - } - if st.Size != size { - return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) - } - return size, nil -} - -// putAnonymousPartial is a special function for uploading content as anonymous request. -// This special function is necessary since Amazon S3 doesn't allow anonymous multipart uploads. -func (c Client) putPartialAnonymous(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error) { - // Input validation. - if err := isValidBucketName(bucketName); err != nil { - return 0, err - } - if err := isValidObjectName(objectName); err != nil { - return 0, err - } - return c.putPartialNoChksum(bucketName, objectName, data, size, contentType) -} - -// putSmallObjectPartial uploads files smaller than 5MiB. -func (c Client) putPartialSmallObject(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error) { - // Input validation. - if err := isValidBucketName(bucketName); err != nil { - return 0, err - } - if err := isValidObjectName(objectName); err != nil { - return 0, err - } - - // readAt defaults to reading at 5MiB buffer. - readAtBuffer := make([]byte, size) - readAtSize, err := data.ReadAt(readAtBuffer, 0) - if err != nil { - if err != io.EOF { - return 0, err - } - } - if int64(readAtSize) != size { - return 0, ErrUnexpectedEOF(int64(readAtSize), size, bucketName, objectName) - } - - // Construct a new PUT object metadata. - putObjData := putObjectData{ - MD5Sum: sumMD5(readAtBuffer), - Sha256Sum: sum256(readAtBuffer), - ReadCloser: ioutil.NopCloser(bytes.NewReader(readAtBuffer)), - Size: size, - ContentType: contentType, - } - // Single part use case, use putObject directly. - st, err := c.putObject(bucketName, objectName, putObjData) - if err != nil { - return 0, err - } - if st.Size != size { - return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) - } - return size, nil -} - -// putPartialLargeObject uploads files bigger than 5MiB. -func (c Client) putPartialLargeObject(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error) { - // Input validation. - if err := isValidBucketName(bucketName); err != nil { - return 0, err - } - if err := isValidObjectName(objectName); err != nil { - return 0, err - } - - // getUploadID for an object, initiates a new multipart request - // if it cannot find any previously partially uploaded object. - uploadID, err := c.getUploadID(bucketName, objectName, contentType) - if err != nil { - return 0, err - } - - // total data read and written to server. should be equal to 'size' at the end of the call. - var totalUploadedSize int64 - - // Complete multipart upload. - var completeMultipartUpload completeMultipartUpload - - // Fetch previously upload parts and save the total size. - partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) - if err != nil { - return 0, err - } - - // Previous maximum part size - var prevMaxPartSize int64 - // previous part number. - var prevPartNumber int - // Loop through all parts and calculate totalUploadedSize. - for _, partInfo := range partsInfo { - totalUploadedSize += partInfo.Size - // Choose the maximum part size. - if partInfo.Size >= prevMaxPartSize { - prevMaxPartSize = partInfo.Size - } - // Save previous part number. - prevPartNumber = partInfo.PartNumber - } - - // Calculate the optimal part size for a given file size. - partSize := optimalPartSize(size) - // If prevMaxPartSize is set use that. - if prevMaxPartSize != 0 { - partSize = prevMaxPartSize - } - - // MD5 and Sha256 hasher. - var hashMD5, hashSha256 hash.Hash - - // Part number always starts with prevPartNumber + 1. i.e The next part number. - partNumber := prevPartNumber + 1 - - // Loop through until EOF. - for totalUploadedSize < size { - // Initialize a new temporary file. - tmpFile, err := newTempFile("multiparts$-putobject-partial") - if err != nil { - return 0, err - } - - // Create a hash multiwriter. - hashMD5 = md5.New() - hashWriter := io.MultiWriter(hashMD5) - if c.signature.isV4() { - hashSha256 = sha256.New() - hashWriter = io.MultiWriter(hashMD5, hashSha256) - } - writer := io.MultiWriter(tmpFile, hashWriter) - - // totalUploadedSize is the current readAtOffset. - readAtOffset := totalUploadedSize - - // Read until partSize. - var totalReadPartSize int64 - - // readAt defaults to reading at 5MiB buffer. - readAtBuffer := make([]byte, optimalReadAtBufferSize) - - // Loop through until partSize. - for totalReadPartSize < partSize { - readAtSize, rerr := data.ReadAt(readAtBuffer, readAtOffset) - if rerr != nil { - if rerr != io.EOF { - return 0, rerr - } - } - writeSize, werr := writer.Write(readAtBuffer[:readAtSize]) - if werr != nil { - return 0, werr - } - if readAtSize != writeSize { - return 0, errors.New("Something really bad happened here. " + reportIssue) - } - readAtOffset += int64(writeSize) - totalReadPartSize += int64(writeSize) - if rerr == io.EOF { - break - } - } - - // Seek back to beginning of the temporary file. - if _, err := tmpFile.Seek(0, 0); err != nil { - return 0, err - } - - // Save all the part metadata. - prtData := partData{ - ReadCloser: tmpFile, - MD5Sum: hashMD5.Sum(nil), - Size: totalReadPartSize, - } - - // Signature version '4'. - if c.signature.isV4() { - prtData.Sha256Sum = hashSha256.Sum(nil) - } - - // Current part number to be uploaded. - prtData.Number = partNumber - - // execute upload part. - objPart, err := c.uploadPart(bucketName, objectName, uploadID, prtData) - if err != nil { - // Close the read closer. - prtData.ReadCloser.Close() - return totalUploadedSize, err - } - - // Save successfully uploaded size. - totalUploadedSize += prtData.Size - - // Save successfully uploaded part metadata. - partsInfo[prtData.Number] = objPart - - // Move to next part. - partNumber++ - } - - // If size is greater than zero verify totalUploaded. - // if totalUploaded is different than the input 'size', do not complete the request throw an error. - if totalUploadedSize != size { - return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) - } - - // Loop over uploaded parts to save them in a Parts array before completing the multipart request. - for _, part := range partsInfo { - var complPart completePart - complPart.ETag = part.ETag - complPart.PartNumber = part.PartNumber - completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart) - } - - // Sort all completed parts. - sort.Sort(completedParts(completeMultipartUpload.Parts)) - _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload) - if err != nil { - return totalUploadedSize, err - } - - // Return final size. - return totalUploadedSize, nil -} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-readat.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-readat.go new file mode 100644 index 000000000..6d1b0e1fe --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-readat.go @@ -0,0 +1,196 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/md5" + "crypto/sha256" + "errors" + "hash" + "io" + "sort" +) + +// putObjectMultipartFromReadAt - Uploads files bigger than 5MiB. Supports reader +// of type which implements io.ReaderAt interface (ReadAt method). +// +// NOTE: This function is meant to be used for all readers which +// implement io.ReaderAt which allows us for resuming multipart +// uploads but reading at an offset, which would avoid re-read the +// data which was already uploaded. Internally this function uses +// temporary files for staging all the data, these temporary files are +// cleaned automatically when the caller i.e http client closes the +// stream after uploading all the contents successfully. +func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, contentType string) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + + // Get upload id for an object, initiates a new multipart request + // if it cannot find any previously partially uploaded object. + uploadID, err := c.getUploadID(bucketName, objectName, contentType) + if err != nil { + return 0, err + } + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var completeMultipartUpload completeMultipartUpload + + // Fetch previously upload parts. + partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) + if err != nil { + return 0, err + } + + // Previous maximum part size + var prevMaxPartSize int64 + // Previous part number. + var prevPartNumber int + // Loop through all parts and calculate totalUploadedSize. + for _, partInfo := range partsInfo { + totalUploadedSize += partInfo.Size + // Choose the maximum part size. + if partInfo.Size >= prevMaxPartSize { + prevMaxPartSize = partInfo.Size + } + // Save previous part number. + prevPartNumber = partInfo.PartNumber + } + + // Calculate the optimal part size for a given file size. + partSize := optimalPartSize(size) + // If prevMaxPartSize is set use that. + if prevMaxPartSize != 0 { + partSize = prevMaxPartSize + } + + // MD5 and SHA256 hasher. + var hashMD5, hashSHA256 hash.Hash + + // Part number always starts with prevPartNumber + 1. i.e The next part number. + partNumber := prevPartNumber + 1 + + // Upload each part until totalUploadedSize reaches input reader size. + for totalUploadedSize < size { + // Initialize a new temporary file. + tmpFile, err := newTempFile("multiparts$-putobject-partial") + if err != nil { + return 0, err + } + + // Create a hash multiwriter. + hashMD5 = md5.New() + hashWriter := io.MultiWriter(hashMD5) + if c.signature.isV4() { + hashSHA256 = sha256.New() + hashWriter = io.MultiWriter(hashMD5, hashSHA256) + } + writer := io.MultiWriter(tmpFile, hashWriter) + + // Choose totalUploadedSize as the current readAtOffset. + readAtOffset := totalUploadedSize + + // Read until partSize. + var totalReadPartSize int64 + + // ReadAt defaults to reading at 5MiB buffer. + readAtBuffer := make([]byte, optimalReadAtBufferSize) + + // Following block reads data at an offset from the input + // reader and copies data to into local temporary file. + // Temporary file data is limited to the partSize. + for totalReadPartSize < partSize { + readAtSize, rerr := reader.ReadAt(readAtBuffer, readAtOffset) + if rerr != nil { + if rerr != io.EOF { + return 0, rerr + } + } + writeSize, werr := writer.Write(readAtBuffer[:readAtSize]) + if werr != nil { + return 0, werr + } + if readAtSize != writeSize { + return 0, errors.New("Something really bad happened here. " + reportIssue) + } + readAtOffset += int64(writeSize) + totalReadPartSize += int64(writeSize) + if rerr == io.EOF { + break + } + } + + // Seek back to beginning of the temporary file. + if _, err := tmpFile.Seek(0, 0); err != nil { + return 0, err + } + + var md5Sum, sha256Sum []byte + md5Sum = hashMD5.Sum(nil) + // Signature version '4'. + if c.signature.isV4() { + sha256Sum = hashSHA256.Sum(nil) + } + + // Proceed to upload the part. + objPart, err := c.uploadPart(bucketName, objectName, uploadID, tmpFile, partNumber, md5Sum, sha256Sum, totalReadPartSize) + if err != nil { + // Close the read closer. + tmpFile.Close() + return totalUploadedSize, err + } + + // Save successfully uploaded size. + totalUploadedSize += totalReadPartSize + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Move to next part. + partNumber++ + } + + // Verify if we uploaded all the data. + if totalUploadedSize != size { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + + // Loop over uploaded parts to save them in a Parts array before completing the multipart request. + for _, part := range partsInfo { + var complPart completePart + complPart.ETag = part.ETag + complPart.PartNumber = part.PartNumber + completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart) + } + + // Sort all completed parts. + sort.Sort(completedParts(completeMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload) + if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go index 563856bae..02f27642f 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go @@ -18,21 +18,43 @@ package minio import ( "bytes" - "crypto/md5" - "crypto/sha256" - "encoding/hex" - "encoding/xml" - "fmt" - "hash" "io" "io/ioutil" "net/http" - "net/url" - "sort" - "strconv" + "os" "strings" ) +// getReaderSize gets the size of the underlying reader, if possible. +func getReaderSize(reader io.Reader) (size int64, err error) { + size = -1 + if reader != nil { + switch v := reader.(type) { + case *bytes.Buffer: + size = int64(v.Len()) + case *bytes.Reader: + size = int64(v.Len()) + case *strings.Reader: + size = int64(v.Len()) + case *os.File: + var st os.FileInfo + st, err = v.Stat() + if err != nil { + return 0, err + } + size = st.Size() + case *Object: + var st ObjectInfo + st, err = v.Stat() + if err != nil { + return 0, err + } + size = st.Size + } + } + return size, nil +} + // completedParts is a collection of parts sortable by their part numbers. // used for sorting the uploaded parts before completing the multipart request. type completedParts []completePart @@ -54,7 +76,7 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part // So we fall back to single PUT operation with the maximum limit of 5GiB. // // NOTE: For anonymous requests Amazon S3 doesn't allow multipart upload. So we fall back to single PUT operation. -func (c Client) PutObject(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) { +func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) { // Input validation. if err := isValidBucketName(bucketName); err != nil { return 0, err @@ -63,6 +85,17 @@ func (c Client) PutObject(bucketName, objectName string, data io.Reader, size in return 0, err } + // get reader size. + size, err := getReaderSize(reader) + if err != nil { + return 0, err + } + + // Check for largest object size allowed. + if size > int64(maxMultipartPutObjectSize) { + return 0, ErrEntityTooLarge(size, bucketName, objectName) + } + // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. // So we fall back to single PUT operation with the maximum limit of 5GiB. if isGoogleEndpoint(c.endpointURL) { @@ -74,35 +107,56 @@ func (c Client) PutObject(bucketName, objectName string, data io.Reader, size in BucketName: bucketName, } } + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, bucketName, objectName) + } // Do not compute MD5 for Google Cloud Storage. Uploads upto 5GiB in size. - return c.putNoChecksum(bucketName, objectName, data, size, contentType) + return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType) } // NOTE: S3 doesn't allow anonymous multipart requests. if isAmazonEndpoint(c.endpointURL) && c.anonymous { - if size <= -1 || size > int64(maxSinglePutObjectSize) { + if size <= -1 { return 0, ErrorResponse{ Code: "NotImplemented", - Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", size), + Message: "Content-Length cannot be negative for anonymous requests.", Key: objectName, BucketName: bucketName, } } + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, bucketName, objectName) + } // Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GiB in size. - return c.putAnonymous(bucketName, objectName, data, size, contentType) + return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType) } - // Large file upload is initiated for uploads for input data size - // if its greater than 5MiB or data size is negative. - if size >= minimumPartSize || size < 0 { - return c.putLargeObject(bucketName, objectName, data, size, contentType) + // putSmall object. + if size < minimumPartSize && size > 0 { + return c.putObjectSingle(bucketName, objectName, reader, size, contentType) } - return c.putSmallObject(bucketName, objectName, data, size, contentType) + // For all sizes greater than 5MiB do multipart. + n, err = c.putObjectMultipart(bucketName, objectName, reader, size, contentType) + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "NotImplemented" { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObjectSingle(bucketName, objectName, reader, size, contentType) + } + return n, err + } + return n, nil } -// putNoChecksum special function used Google Cloud Storage. This special function +// putObjectNoChecksum special function used Google Cloud Storage. This special function // is used for Google Cloud Storage since Google's multipart API is not S3 compatible. -func (c Client) putNoChecksum(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) { +func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, contentType string) (n int64, err error) { // Input validation. if err := isValidBucketName(bucketName); err != nil { return 0, err @@ -110,19 +164,12 @@ func (c Client) putNoChecksum(bucketName, objectName string, data io.Reader, siz if err := isValidObjectName(objectName); err != nil { return 0, err } - if size > maxPartSize { + if size > maxSinglePutObjectSize { return 0, ErrEntityTooLarge(size, bucketName, objectName) } - // For anonymous requests, we will not calculate sha256 and md5sum. - putObjData := putObjectData{ - MD5Sum: nil, - Sha256Sum: nil, - ReadCloser: ioutil.NopCloser(data), - Size: size, - ContentType: contentType, - } + // This function does not calculate sha256 and md5sum for payload. // Execute put object. - st, err := c.putObject(bucketName, objectName, putObjData) + st, err := c.putObjectDo(bucketName, objectName, ioutil.NopCloser(reader), nil, nil, size, contentType) if err != nil { return 0, err } @@ -132,10 +179,9 @@ func (c Client) putNoChecksum(bucketName, objectName string, data io.Reader, siz return size, nil } -// putAnonymous is a special function for uploading content as anonymous request. -// This special function is necessary since Amazon S3 doesn't allow anonymous -// multipart uploads. -func (c Client) putAnonymous(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) { +// putObjectSingle is a special function for uploading single put object request. +// This special function is used as a fallback when multipart upload fails. +func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, contentType string) (n int64, err error) { // Input validation. if err := isValidBucketName(bucketName); err != nil { return 0, err @@ -143,431 +189,96 @@ func (c Client) putAnonymous(bucketName, objectName string, data io.Reader, size if err := isValidObjectName(objectName); err != nil { return 0, err } - return c.putNoChecksum(bucketName, objectName, data, size, contentType) -} - -// putSmallObject uploads files smaller than 5 mega bytes. -func (c Client) putSmallObject(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) { - // Input validation. - if err := isValidBucketName(bucketName); err != nil { - return 0, err + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, bucketName, objectName) } - if err := isValidObjectName(objectName); err != nil { - return 0, err + // If size is a stream, upload upto 5GiB. + if size <= -1 { + size = maxSinglePutObjectSize } - // Read input data fully into buffer. - dataBytes, err := ioutil.ReadAll(data) + // Initialize a new temporary file. + tmpFile, err := newTempFile("single$-putobject-single") if err != nil { return 0, err } - if int64(len(dataBytes)) != size { - return 0, ErrUnexpectedEOF(int64(len(dataBytes)), size, bucketName, objectName) - } - // Construct a new PUT object metadata. - putObjData := putObjectData{ - MD5Sum: sumMD5(dataBytes), - Sha256Sum: sum256(dataBytes), - ReadCloser: ioutil.NopCloser(bytes.NewReader(dataBytes)), - Size: size, - ContentType: contentType, - } - // Single part use case, use putObject directly. - st, err := c.putObject(bucketName, objectName, putObjData) + md5Sum, sha256Sum, size, err := c.hashCopyN(tmpFile, reader, size) if err != nil { - return 0, err - } - if st.Size != size { - return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) - } - return size, nil -} - -// hashCopy - calculates Md5sum and Sha256sum for upto partSize amount of bytes. -func (c Client) hashCopy(writer io.ReadWriteSeeker, data io.Reader, partSize int64) (md5Sum, sha256Sum []byte, size int64, err error) { - // MD5 and Sha256 hasher. - var hashMD5, hashSha256 hash.Hash - // MD5 and Sha256 hasher. - hashMD5 = md5.New() - hashWriter := io.MultiWriter(writer, hashMD5) - if c.signature.isV4() { - hashSha256 = sha256.New() - hashWriter = io.MultiWriter(writer, hashMD5, hashSha256) - } - - // Copies to input at writer. - size, err = io.CopyN(hashWriter, data, partSize) - if err != nil { - // If not EOF return error right here. if err != io.EOF { - return nil, nil, 0, err - } - } - - // Seek back to beginning of input, any error fail right here. - if _, err := writer.Seek(0, 0); err != nil { - return nil, nil, 0, err - } - - // Finalize md5shum and sha256 sum. - md5Sum = hashMD5.Sum(nil) - if c.signature.isV4() { - sha256Sum = hashSha256.Sum(nil) - } - return md5Sum, sha256Sum, size, err -} - -// putLargeObject uploads files bigger than 5 mega bytes. -func (c Client) putLargeObject(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) { - // Input validation. - if err := isValidBucketName(bucketName); err != nil { - return 0, err - } - if err := isValidObjectName(objectName); err != nil { - return 0, err - } - - // getUploadID for an object, initiates a new multipart request - // if it cannot find any previously partially uploaded object. - uploadID, err := c.getUploadID(bucketName, objectName, contentType) - if err != nil { - return 0, err - } - - // total data read and written to server. should be equal to 'size' at the end of the call. - var totalUploadedSize int64 - - // Complete multipart upload. - var completeMultipartUpload completeMultipartUpload - - // Fetch previously upload parts and save the total size. - partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) - if err != nil { - return 0, err - } - // Previous maximum part size - var prevMaxPartSize int64 - // Loop through all parts and calculate totalUploadedSize. - for _, partInfo := range partsInfo { - // Choose the maximum part size. - if partInfo.Size >= prevMaxPartSize { - prevMaxPartSize = partInfo.Size - } - } - - // Calculate the optimal part size for a given size. - partSize := optimalPartSize(size) - // If prevMaxPartSize is set use that. - if prevMaxPartSize != 0 { - partSize = prevMaxPartSize - } - - // Part number always starts with '0'. - partNumber := 0 - - // Loop through until EOF. - for { - // Increment part number. - partNumber++ - - // Initialize a new temporary file. - tmpFile, err := newTempFile("multiparts$-putobject") - if err != nil { return 0, err } - - // Calculates MD5 and Sha256 sum while copying partSize bytes into tmpFile. - md5Sum, sha256Sum, size, rErr := c.hashCopy(tmpFile, data, partSize) - if rErr != nil { - if rErr != io.EOF { - return 0, rErr - } - } - - // Save all the part metadata. - prtData := partData{ - ReadCloser: tmpFile, - Size: size, - MD5Sum: md5Sum, - Sha256Sum: sha256Sum, - Number: partNumber, // Current part number to be uploaded. - } - - // If part not uploaded proceed to upload. - if !isPartUploaded(objectPart{ - ETag: hex.EncodeToString(prtData.MD5Sum), - PartNumber: partNumber, - }, partsInfo) { - // execute upload part. - objPart, err := c.uploadPart(bucketName, objectName, uploadID, prtData) - if err != nil { - // Close the read closer. - prtData.ReadCloser.Close() - return 0, err - } - // Save successfully uploaded part metadata. - partsInfo[prtData.Number] = objPart - } - - // Close the read closer. - prtData.ReadCloser.Close() - - // If read error was an EOF, break out of the loop. - if rErr == io.EOF { - break - } } - - // Loop over uploaded parts to save them in a Parts array before completing the multipart request. - for _, part := range partsInfo { - var complPart completePart - complPart.ETag = part.ETag - complPart.PartNumber = part.PartNumber - completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart) - // Save successfully uploaded size. - totalUploadedSize += part.Size - } - - // If size is greater than zero verify totalUploadedSize. if totalUploadedSize is - // different than the input 'size', do not complete the request throw an error. - if size > 0 { - if totalUploadedSize != size { - return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) - } - } - - // If partNumber is different than total list of parts, error out. - if partNumber != len(completeMultipartUpload.Parts) { - return totalUploadedSize, ErrInvalidParts(partNumber, len(completeMultipartUpload.Parts)) - } - - // Sort all completed parts. - sort.Sort(completedParts(completeMultipartUpload.Parts)) - _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload) + // Execute put object. + st, err := c.putObjectDo(bucketName, objectName, tmpFile, md5Sum, sha256Sum, size, contentType) if err != nil { - return totalUploadedSize, err + return 0, err } - - // Return final size. - return totalUploadedSize, nil + if st.Size != size { + return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) + } + return size, nil } -// putObject - add an object to a bucket. +// putObjectDo - executes the put object http operation. // NOTE: You must have WRITE permissions on a bucket to add an object to it. -func (c Client) putObject(bucketName, objectName string, putObjData putObjectData) (ObjectStat, error) { +func (c Client) putObjectDo(bucketName, objectName string, reader io.ReadCloser, md5Sum []byte, sha256Sum []byte, size int64, contentType string) (ObjectInfo, error) { // Input validation. if err := isValidBucketName(bucketName); err != nil { - return ObjectStat{}, err + return ObjectInfo{}, err } if err := isValidObjectName(objectName); err != nil { - return ObjectStat{}, err + return ObjectInfo{}, err } - if strings.TrimSpace(putObjData.ContentType) == "" { - putObjData.ContentType = "application/octet-stream" + if size <= -1 { + return ObjectInfo{}, ErrEntityTooSmall(size, bucketName, objectName) + } + + if size > maxSinglePutObjectSize { + return ObjectInfo{}, ErrEntityTooLarge(size, bucketName, objectName) + } + + if strings.TrimSpace(contentType) == "" { + contentType = "application/octet-stream" } // Set headers. customHeader := make(http.Header) - customHeader.Set("Content-Type", putObjData.ContentType) + customHeader.Set("Content-Type", contentType) // Populate request metadata. reqMetadata := requestMetadata{ bucketName: bucketName, objectName: objectName, customHeader: customHeader, - contentBody: putObjData.ReadCloser, - contentLength: putObjData.Size, - contentSha256Bytes: putObjData.Sha256Sum, - contentMD5Bytes: putObjData.MD5Sum, + contentBody: reader, + contentLength: size, + contentMD5Bytes: md5Sum, + contentSHA256Bytes: sha256Sum, } // Initiate new request. req, err := c.newRequest("PUT", reqMetadata) if err != nil { - return ObjectStat{}, err + return ObjectInfo{}, err } // Execute the request. resp, err := c.do(req) defer closeResponse(resp) if err != nil { - return ObjectStat{}, err + return ObjectInfo{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { - return ObjectStat{}, HTTPRespToErrorResponse(resp, bucketName, objectName) + return ObjectInfo{}, HTTPRespToErrorResponse(resp, bucketName, objectName) } } - var metadata ObjectStat + var metadata ObjectInfo // Trim off the odd double quotes from ETag in the beginning and end. metadata.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") metadata.ETag = strings.TrimSuffix(metadata.ETag, "\"") // A success here means data was written to server successfully. - metadata.Size = putObjData.Size + metadata.Size = size // Return here. return metadata, nil } - -// initiateMultipartUpload initiates a multipart upload and returns an upload ID. -func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) { - // Input validation. - if err := isValidBucketName(bucketName); err != nil { - return initiateMultipartUploadResult{}, err - } - if err := isValidObjectName(objectName); err != nil { - return initiateMultipartUploadResult{}, err - } - - // Initialize url queries. - urlValues := make(url.Values) - urlValues.Set("uploads", "") - - if contentType == "" { - contentType = "application/octet-stream" - } - - // set ContentType header. - customHeader := make(http.Header) - customHeader.Set("Content-Type", contentType) - - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - customHeader: customHeader, - } - - // Instantiate the request. - req, err := c.newRequest("POST", reqMetadata) - if err != nil { - return initiateMultipartUploadResult{}, err - } - // Execute the request. - resp, err := c.do(req) - defer closeResponse(resp) - if err != nil { - return initiateMultipartUploadResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return initiateMultipartUploadResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName) - } - } - // Decode xml initiate multipart. - initiateMultipartUploadResult := initiateMultipartUploadResult{} - err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) - if err != nil { - return initiateMultipartUploadResult, err - } - return initiateMultipartUploadResult, nil -} - -// uploadPart uploads a part in a multipart upload. -func (c Client) uploadPart(bucketName, objectName, uploadID string, uploadingPart partData) (objectPart, error) { - // Input validation. - if err := isValidBucketName(bucketName); err != nil { - return objectPart{}, err - } - if err := isValidObjectName(objectName); err != nil { - return objectPart{}, err - } - - // Get resources properly escaped and lined up before using them in http request. - urlValues := make(url.Values) - // Set part number. - urlValues.Set("partNumber", strconv.Itoa(uploadingPart.Number)) - // Set upload id. - urlValues.Set("uploadId", uploadID) - - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentBody: uploadingPart.ReadCloser, - contentLength: uploadingPart.Size, - contentSha256Bytes: uploadingPart.Sha256Sum, - contentMD5Bytes: uploadingPart.MD5Sum, - } - - // Instantiate a request. - req, err := c.newRequest("PUT", reqMetadata) - if err != nil { - return objectPart{}, err - } - // Execute the request. - resp, err := c.do(req) - defer closeResponse(resp) - if err != nil { - return objectPart{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return objectPart{}, HTTPRespToErrorResponse(resp, bucketName, objectName) - } - } - // Once successfully uploaded, return completed part. - objPart := objectPart{} - objPart.Size = uploadingPart.Size - objPart.PartNumber = uploadingPart.Number - // Trim off the odd double quotes from ETag in the beginning and end. - objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") - objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"") - return objPart, nil -} - -// completeMultipartUpload completes a multipart upload by assembling previously uploaded parts. -func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) { - // Input validation. - if err := isValidBucketName(bucketName); err != nil { - return completeMultipartUploadResult{}, err - } - if err := isValidObjectName(objectName); err != nil { - return completeMultipartUploadResult{}, err - } - - // Initialize url queries. - urlValues := make(url.Values) - urlValues.Set("uploadId", uploadID) - - // Marshal complete multipart body. - completeMultipartUploadBytes, err := xml.Marshal(complete) - if err != nil { - return completeMultipartUploadResult{}, err - } - - // Instantiate all the complete multipart buffer. - completeMultipartUploadBuffer := bytes.NewBuffer(completeMultipartUploadBytes) - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentBody: ioutil.NopCloser(completeMultipartUploadBuffer), - contentLength: int64(completeMultipartUploadBuffer.Len()), - contentSha256Bytes: sum256(completeMultipartUploadBuffer.Bytes()), - } - - // Instantiate the request. - req, err := c.newRequest("POST", reqMetadata) - if err != nil { - return completeMultipartUploadResult{}, err - } - - // Execute the request. - resp, err := c.do(req) - defer closeResponse(resp) - if err != nil { - return completeMultipartUploadResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return completeMultipartUploadResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName) - } - } - // If successful response, decode the body. - completeMultipartUploadResult := completeMultipartUploadResult{} - err = xmlDecoder(resp.Body, &completeMultipartUploadResult) - if err != nil { - return completeMultipartUploadResult, err - } - return completeMultipartUploadResult, nil -} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-remove.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-remove.go index 0e1abc2e3..1ac420782 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-remove.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-remove.go @@ -26,15 +26,18 @@ import ( // All objects (including all object versions and delete markers). // in the bucket must be deleted before successfully attempting this request. func (c Client) RemoveBucket(bucketName string) error { + // Input validation. if err := isValidBucketName(bucketName); err != nil { return err } + // Instantiate a new request. req, err := c.newRequest("DELETE", requestMetadata{ bucketName: bucketName, }) if err != nil { return err } + // Initiate the request. resp, err := c.do(req) defer closeResponse(resp) if err != nil { @@ -54,12 +57,14 @@ func (c Client) RemoveBucket(bucketName string) error { // RemoveObject remove an object from a bucket. func (c Client) RemoveObject(bucketName, objectName string) error { + // Input validation. if err := isValidBucketName(bucketName); err != nil { return err } if err := isValidObjectName(objectName); err != nil { return err } + // Instantiate the request. req, err := c.newRequest("DELETE", requestMetadata{ bucketName: bucketName, objectName: objectName, @@ -67,6 +72,7 @@ func (c Client) RemoveObject(bucketName, objectName string) error { if err != nil { return err } + // Initiate the request. resp, err := c.do(req) defer closeResponse(resp) if err != nil { @@ -81,42 +87,32 @@ func (c Client) RemoveObject(bucketName, objectName string) error { // RemoveIncompleteUpload aborts an partially uploaded object. // Requires explicit authentication, no anonymous requests are allowed for multipart API. func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { - // Validate input arguments. + // Input validation. if err := isValidBucketName(bucketName); err != nil { return err } if err := isValidObjectName(objectName); err != nil { return err } - errorCh := make(chan error) - go func(errorCh chan<- error) { - defer close(errorCh) - // Find multipart upload id of the object. - uploadID, err := c.findUploadID(bucketName, objectName) - if err != nil { - errorCh <- err - return - } - if uploadID != "" { - // If uploadID is not an empty string, initiate the request. - err := c.abortMultipartUpload(bucketName, objectName, uploadID) - if err != nil { - errorCh <- err - return - } - return - } - }(errorCh) - err, ok := <-errorCh - if ok && err != nil { + // Find multipart upload id of the object to be aborted. + uploadID, err := c.findUploadID(bucketName, objectName) + if err != nil { return err } + if uploadID != "" { + // Upload id found, abort the incomplete multipart upload. + err := c.abortMultipartUpload(bucketName, objectName, uploadID) + if err != nil { + return err + } + } return nil } -// abortMultipartUpload aborts a multipart upload for the given uploadID, all parts are deleted. +// abortMultipartUpload aborts a multipart upload for the given +// uploadID, all previously uploaded parts are deleted. func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error { - // Validate input arguments. + // Input validation. if err := isValidBucketName(bucketName); err != nil { return err } @@ -138,7 +134,7 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er return err } - // execute the request. + // Initiate the request. resp, err := c.do(req) defer closeResponse(resp) if err != nil { @@ -146,11 +142,12 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er } if resp != nil { if resp.StatusCode != http.StatusNoContent { - // Abort has no response body, handle it. + // Abort has no response body, handle it for any errors. var errorResponse ErrorResponse switch resp.StatusCode { case http.StatusNotFound: - // This is needed specifically for Abort and it cannot be converged. + // This is needed specifically for abort and it cannot + // be converged into default case. errorResponse = ErrorResponse{ Code: "NoSuchUpload", Message: "The specified multipart upload does not exist.", diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-s3-definitions.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-s3-definitions.go index 61931b0b3..de562e475 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-s3-definitions.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-s3-definitions.go @@ -21,31 +21,34 @@ import ( "time" ) -// listAllMyBucketsResult container for listBuckets response +// listAllMyBucketsResult container for listBuckets response. type listAllMyBucketsResult struct { // Container for one or more buckets. Buckets struct { - Bucket []BucketStat + Bucket []BucketInfo } Owner owner } -// owner container for bucket owner information +// owner container for bucket owner information. type owner struct { DisplayName string ID string } -// commonPrefix container for prefix response +// commonPrefix container for prefix response. type commonPrefix struct { Prefix string } -// listBucketResult container for listObjects response +// listBucketResult container for listObjects response. type listBucketResult struct { - CommonPrefixes []commonPrefix // A response can contain CommonPrefixes only if you have specified a delimiter - Contents []ObjectStat // Metadata about each object returned - Delimiter string + // A response can contain CommonPrefixes only if you have + // specified a delimiter. + CommonPrefixes []commonPrefix + // Metadata about each object returned. + Contents []ObjectInfo + Delimiter string // Encoding type used to encode object keys in the response. EncodingType string @@ -57,13 +60,15 @@ type listBucketResult struct { MaxKeys int64 Name string - // When response is truncated (the IsTruncated element value in the response - // is true), you can use the key name in this field as marker in the subsequent - // request to get next set of objects. Object storage lists objects in alphabetical - // order Note: This element is returned only if you have delimiter request parameter - // specified. If response does not include the NextMaker and it is truncated, - // you can use the value of the last Key in the response as the marker in the - // subsequent request to get the next set of object keys. + // When response is truncated (the IsTruncated element value in + // the response is true), you can use the key name in this field + // as marker in the subsequent request to get next set of objects. + // Object storage lists objects in alphabetical order Note: This + // element is returned only if you have delimiter request + // parameter specified. If response does not include the NextMaker + // and it is truncated, you can use the value of the last Key in + // the response as the marker in the subsequent request to get the + // next set of object keys. NextMarker string Prefix string } @@ -78,19 +83,20 @@ type listMultipartUploadsResult struct { EncodingType string MaxUploads int64 IsTruncated bool - Uploads []ObjectMultipartStat `xml:"Upload"` + Uploads []ObjectMultipartInfo `xml:"Upload"` Prefix string Delimiter string - CommonPrefixes []commonPrefix // A response can contain CommonPrefixes only if you specify a delimiter + // A response can contain CommonPrefixes only if you specify a delimiter. + CommonPrefixes []commonPrefix } -// initiator container for who initiated multipart upload +// initiator container for who initiated multipart upload. type initiator struct { ID string DisplayName string } -// objectPart container for particular part of an object +// objectPart container for particular part of an object. type objectPart struct { // Part number identifies the part. PartNumber int @@ -98,7 +104,8 @@ type objectPart struct { // Date and time the part was uploaded. LastModified time.Time - // Entity tag returned when the part was uploaded, usually md5sum of the part + // Entity tag returned when the part was uploaded, usually md5sum + // of the part. ETag string // Size of the uploaded part data. @@ -126,14 +133,16 @@ type listObjectPartsResult struct { EncodingType string } -// initiateMultipartUploadResult container for InitiateMultiPartUpload response. +// initiateMultipartUploadResult container for InitiateMultiPartUpload +// response. type initiateMultipartUploadResult struct { Bucket string Key string UploadID string `xml:"UploadId"` } -// completeMultipartUploadResult container for completed multipart upload response. +// completeMultipartUploadResult container for completed multipart +// upload response. type completeMultipartUploadResult struct { Location string Bucket string @@ -141,7 +150,8 @@ type completeMultipartUploadResult struct { ETag string } -// completePart sub container lists individual part numbers and their md5sum, part of completeMultipartUpload. +// completePart sub container lists individual part numbers and their +// md5sum, part of completeMultipartUpload. type completePart struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` @@ -150,13 +160,13 @@ type completePart struct { ETag string } -// completeMultipartUpload container for completing multipart upload +// completeMultipartUpload container for completing multipart upload. type completeMultipartUpload struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` Parts []completePart `xml:"Part"` } -// createBucketConfiguration container for bucket configuration +// createBucketConfiguration container for bucket configuration. type createBucketConfiguration struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"` Location string `xml:"LocationConstraint"` @@ -164,7 +174,8 @@ type createBucketConfiguration struct { // grant container for the grantee and his or her permissions. type grant struct { - // grantee container for DisplayName and ID of the person being granted permissions. + // grantee container for DisplayName and ID of the person being + // granted permissions. Grantee struct { ID string DisplayName string @@ -175,7 +186,8 @@ type grant struct { Permission string } -// accessControlPolicy contains the elements providing ACL permissions for a bucket. +// accessControlPolicy contains the elements providing ACL permissions +// for a bucket. type accessControlPolicy struct { // accessControlList container for ACL information. AccessControlList struct { diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go index 8a29bccd5..826782033 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go @@ -25,15 +25,18 @@ import ( // BucketExists verify if bucket exists and you have permission to access it. func (c Client) BucketExists(bucketName string) error { + // Input validation. if err := isValidBucketName(bucketName); err != nil { return err } + // Instantiate a new request. req, err := c.newRequest("HEAD", requestMetadata{ bucketName: bucketName, }) if err != nil { return err } + // Initiate the request. resp, err := c.do(req) defer closeResponse(resp) if err != nil { @@ -48,12 +51,13 @@ func (c Client) BucketExists(bucketName string) error { } // StatObject verifies if object exists and you have permission to access. -func (c Client) StatObject(bucketName, objectName string) (ObjectStat, error) { +func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) { + // Input validation. if err := isValidBucketName(bucketName); err != nil { - return ObjectStat{}, err + return ObjectInfo{}, err } if err := isValidObjectName(objectName); err != nil { - return ObjectStat{}, err + return ObjectInfo{}, err } // Instantiate a new request. req, err := c.newRequest("HEAD", requestMetadata{ @@ -61,16 +65,17 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectStat, error) { objectName: objectName, }) if err != nil { - return ObjectStat{}, err + return ObjectInfo{}, err } + // Initiate the request. resp, err := c.do(req) defer closeResponse(resp) if err != nil { - return ObjectStat{}, err + return ObjectInfo{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { - return ObjectStat{}, HTTPRespToErrorResponse(resp, bucketName, objectName) + return ObjectInfo{}, HTTPRespToErrorResponse(resp, bucketName, objectName) } } @@ -81,7 +86,7 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectStat, error) { // Parse content length. size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) if err != nil { - return ObjectStat{}, ErrorResponse{ + return ObjectInfo{}, ErrorResponse{ Code: "InternalError", Message: "Content-Length is invalid. " + reportIssue, BucketName: bucketName, @@ -91,9 +96,10 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectStat, error) { AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), } } + // Parse Last-Modified has http time format. date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) if err != nil { - return ObjectStat{}, ErrorResponse{ + return ObjectInfo{}, ErrorResponse{ Code: "InternalError", Message: "Last-Modified time format is invalid. " + reportIssue, BucketName: bucketName, @@ -103,12 +109,13 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectStat, error) { AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), } } + // Fetch content type if any present. contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) if contentType == "" { contentType = "application/octet-stream" } // Save object metadata info. - var objectStat ObjectStat + var objectStat ObjectInfo objectStat.ETag = md5sum objectStat.Key = objectName objectStat.Size = size diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api.go b/Godeps/_workspace/src/github.com/minio/minio-go/api.go index f74bf2036..9b7f3c077 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api.go @@ -25,6 +25,7 @@ import ( "net/http/httputil" "net/url" "os" + "regexp" "runtime" "strings" "time" @@ -33,10 +34,15 @@ import ( // Client implements Amazon S3 compatible methods. type Client struct { /// Standard options. - accessKeyID string // AccessKeyID required for authorized requests. - secretAccessKey string // SecretAccessKey required for authorized requests. - signature SignatureType // Choose a signature type if necessary. - anonymous bool // Set to 'true' if Client has no access and secret keys. + + // AccessKeyID required for authorized requests. + accessKeyID string + // SecretAccessKey required for authorized requests. + secretAccessKey string + // Choose a signature type if necessary. + signature SignatureType + // Set to 'true' if Client has no access and secret keys. + anonymous bool // User supplied. appInfo struct { @@ -69,7 +75,8 @@ const ( libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion ) -// NewV2 - instantiate minio client with Amazon S3 signature version '2' compatiblity. +// NewV2 - instantiate minio client with Amazon S3 signature version +// '2' compatiblity. func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) { clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure) if err != nil { @@ -80,7 +87,8 @@ func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) return clnt, nil } -// NewV4 - instantiate minio client with Amazon S3 signature version '4' compatibility. +// NewV4 - instantiate minio client with Amazon S3 signature version +// '4' compatibility. func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) { clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure) if err != nil { @@ -91,13 +99,15 @@ func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) return clnt, nil } -// New - instantiate minio client Client, adds automatic verification of signature. +// New - instantiate minio client Client, adds automatic verification +// of signature. func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) { clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure) if err != nil { return nil, err } - // Google cloud storage should be set to signature V2, force it if not. + // Google cloud storage should be set to signature V2, force it if + // not. if isGoogleEndpoint(clnt.endpointURL) { clnt.signature = SignatureV2 } @@ -136,7 +146,8 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (* // SetAppInfo - add application details to user agent. func (c *Client) SetAppInfo(appName string, appVersion string) { - // if app name and version is not set, we do not a new user agent. + // if app name and version is not set, we do not a new user + // agent. if appName != "" && appVersion != "" { c.appInfo = struct { appName string @@ -149,12 +160,13 @@ func (c *Client) SetAppInfo(appName string, appVersion string) { // SetCustomTransport - set new custom transport. func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) { - // Set this to override default transport ``http.DefaultTransport``. + // Set this to override default transport + // ``http.DefaultTransport``. // - // This transport is usually needed for debugging OR to add your own - // custom TLS certificates on the client transport, for custom CA's and - // certs which are not part of standard certificate authority follow this - // example :- + // This transport is usually needed for debugging OR to add your + // own custom TLS certificates on the client transport, for custom + // CA's and certs which are not part of standard certificate + // authority follow this example :- // // tr := &http.Transport{ // TLSClientConfig: &tls.Config{RootCAs: pool}, @@ -187,7 +199,8 @@ func (c *Client) TraceOff() { c.isTraceEnabled = false } -// requestMetadata - is container for all the values to make a request. +// requestMetadata - is container for all the values to make a +// request. type requestMetadata struct { // If set newRequest presigns the URL. presignURL bool @@ -202,10 +215,41 @@ type requestMetadata struct { // Generated by our internal code. contentBody io.ReadCloser contentLength int64 - contentSha256Bytes []byte + contentSHA256Bytes []byte contentMD5Bytes []byte } +// Filter out signature value from Authorization header. +func (c Client) filterSignature(req *http.Request) { + // For anonymous requests return here. + if c.anonymous { + return + } + // Handle if Signature V2. + if c.signature.isV2() { + // Set a temporary redacted auth + req.Header.Set("Authorization", "AWS **REDACTED**:**REDACTED**") + return + } + + /// Signature V4 authorization header. + + // Save the original auth. + origAuth := req.Header.Get("Authorization") + // Strip out accessKeyID from: + // Credential=////aws4_request + regCred := regexp.MustCompile("Credential=([A-Z0-9]+)/") + newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") + + // Strip out 256-bit signature from: Signature=<256-bit signature> + regSign := regexp.MustCompile("Signature=([[0-9a-f]+)") + newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") + + // Set a temporary redacted auth + req.Header.Set("Authorization", newAuth) + return +} + // dumpHTTP - dump HTTP request and response. func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { // Starts http dump. @@ -214,6 +258,9 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { return err } + // Filter out Signature field from Authorization header. + c.filterSignature(req) + // Only display request header. reqTrace, err := httputil.DumpRequestOut(req, false) if err != nil { @@ -227,11 +274,22 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { } // Only display response header. - respTrace, err := httputil.DumpResponse(resp, false) - if err != nil { - return err - } + var respTrace []byte + // For errors we make sure to dump response body as well. + if resp.StatusCode != http.StatusOK && + resp.StatusCode != http.StatusPartialContent && + resp.StatusCode != http.StatusNoContent { + respTrace, err = httputil.DumpResponse(resp, true) + if err != nil { + return err + } + } else { + respTrace, err = httputil.DumpResponse(resp, false) + if err != nil { + return err + } + } // Write response to trace output. _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) if err != nil { @@ -328,11 +386,12 @@ func (c Client) newRequest(method string, metadata requestMetadata) (*http.Reque // Set sha256 sum only for non anonymous credentials. if !c.anonymous { - // set sha256 sum for signature calculation only with signature version '4'. + // set sha256 sum for signature calculation only with + // signature version '4'. if c.signature.isV4() { req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) - if metadata.contentSha256Bytes != nil { - req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(metadata.contentSha256Bytes)) + if metadata.contentSHA256Bytes != nil { + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(metadata.contentSHA256Bytes)) } } } @@ -356,6 +415,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (*http.Reque return req, nil } +// set User agent. func (c Client) setUserAgent(req *http.Request) { req.Header.Set("User-Agent", libraryUserAgent) if c.appInfo.appName != "" && c.appInfo.appVersion != "" { @@ -363,12 +423,15 @@ func (c Client) setUserAgent(req *http.Request) { } } +// makeTargetURL make a new target url. func (c Client) makeTargetURL(bucketName, objectName string, queryValues url.Values) (*url.URL, error) { urlStr := c.endpointURL.Scheme + "://" + c.endpointURL.Host + "/" - // Make URL only if bucketName is available, otherwise use the endpoint URL. + // Make URL only if bucketName is available, otherwise use the + // endpoint URL. if bucketName != "" { // If endpoint supports virtual host style use that always. - // Currently only S3 and Google Cloud Storage would support this. + // Currently only S3 and Google Cloud Storage would support + // this. if isVirtualHostSupported(c.endpointURL) { urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + c.endpointURL.Host + "/" if objectName != "" { @@ -403,21 +466,17 @@ type CloudStorageClient interface { SetBucketACL(bucketName string, cannedACL BucketACL) error GetBucketACL(bucketName string) (BucketACL, error) - ListBuckets() ([]BucketStat, error) - ListObjects(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectStat - ListIncompleteUploads(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartStat + ListBuckets() ([]BucketInfo, error) + ListObjects(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo + ListIncompleteUploads(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo // Object Read/Write/Stat operations. - GetObject(bucketName, objectName string) (reader io.ReadCloser, stat ObjectStat, err error) - PutObject(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) - StatObject(bucketName, objectName string) (ObjectStat, error) + GetObject(bucketName, objectName string) (reader *Object, err error) + PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) + StatObject(bucketName, objectName string) (ObjectInfo, error) RemoveObject(bucketName, objectName string) error RemoveIncompleteUpload(bucketName, objectName string) error - // Object Read/Write for sparse upload. - GetObjectPartial(bucketName, objectName string) (reader ReadAtCloser, stat ObjectStat, err error) - PutObjectPartial(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error) - // File to Object API. FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) FGetObject(bucketName, objectName, filePath string) error diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_v2_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_v2_test.go new file mode 100644 index 000000000..51ba285c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_v2_test.go @@ -0,0 +1,751 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio_test + +import ( + "bytes" + crand "crypto/rand" + "errors" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "testing" + "time" + + "github.com/minio/minio-go" +) + +// Tests removing partially uploaded objects. +func TestRemovePartiallyUploadedV2(t *testing.T) { + if testing.Short() { + t.Skip("skipping function tests for short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.NewV2( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + reader, writer := io.Pipe() + go func() { + i := 0 + for i < 25 { + _, err = io.CopyN(writer, crand.Reader, 128*1024) + if err != nil { + t.Fatal("Error:", err, bucketName) + } + i++ + } + writer.CloseWithError(errors.New("Proactively closed to be verified later.")) + }() + + objectName := bucketName + "-resumable" + _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream") + if err == nil { + t.Fatal("Error: PutObject should fail.") + } + if err.Error() != "Proactively closed to be verified later." { + t.Fatal("Error:", err) + } + err = c.RemoveIncompleteUpload(bucketName, objectName) + if err != nil { + t.Fatal("Error:", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests resumable file based put object multipart upload. +func TestResumableFPutObjectV2(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for the short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + file, err := ioutil.TempFile(os.TempDir(), "resumable") + if err != nil { + t.Fatal("Error:", err) + } + + n, err := io.CopyN(file, crand.Reader, 11*1024*1024) + if err != nil { + t.Fatal("Error:", err) + } + if n != int64(11*1024*1024) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) + } + + objectName := bucketName + "-resumable" + + n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream") + if err != nil { + t.Fatal("Error:", err) + } + if n != int64(11*1024*1024) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) + } + + // Close the file pro-actively for windows. + file.Close() + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } + + err = os.Remove(file.Name()) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests resumable put object multipart upload. +func TestResumablePutObjectV2(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for the short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // generate 11MB + buf := make([]byte, 11*1024*1024) + + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error:", err) + } + + objectName := bucketName + "-resumable" + reader := bytes.NewReader(buf) + n, err := c.PutObject(bucketName, objectName, reader, "application/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests get object ReaderSeeker interface methods. +func TestGetObjectReadSeekFunctionalV2(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Generate data more than 32K + buf := make([]byte, rand.Intn(1<<20)+32*1024) + + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error:", err) + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano())) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + st, err := r.Stat() + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + if st.Size != int64(len(buf)) { + t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n", + len(buf), st.Size) + } + + offset := int64(2048) + n, err = r.Seek(offset, 0) + if err != nil { + t.Fatal("Error:", err, offset) + } + if n != offset { + t.Fatalf("Error: number of bytes seeked does not match, want %v, got %v\n", + offset, n) + } + n, err = r.Seek(0, 1) + if err != nil { + t.Fatal("Error:", err) + } + if n != offset { + t.Fatalf("Error: number of current seek does not match, want %v, got %v\n", + offset, n) + } + _, err = r.Seek(offset, 2) + if err == nil { + t.Fatal("Error: seek on positive offset for whence '2' should error out") + } + n, err = r.Seek(-offset, 2) + if err != nil { + t.Fatal("Error:", err) + } + if n != 0 { + t.Fatalf("Error: number of bytes seeked back does not match, want 0, got %v\n", n) + } + var buffer bytes.Buffer + if _, err = io.CopyN(&buffer, r, st.Size); err != nil { + t.Fatal("Error:", err) + } + if !bytes.Equal(buf, buffer.Bytes()) { + t.Fatal("Error: Incorrect read bytes v/s original buffer.") + } + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests get object ReaderAt interface methods. +func TestGetObjectReadAtFunctionalV2(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for the short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Generate data more than 32K + buf := make([]byte, rand.Intn(1<<20)+32*1024) + + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error:", err) + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano())) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + st, err := r.Stat() + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + if st.Size != int64(len(buf)) { + t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n", + len(buf), st.Size) + } + + offset := int64(2048) + + // Read directly + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + m, err := r.ReadAt(buf2, offset) + if err != nil { + t.Fatal("Error:", err, st.Size, len(buf2), offset) + } + if m != len(buf2) { + t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf2)) + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + t.Fatal("Error: Incorrect read between two ReadAt from same offset.") + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + t.Fatal("Error:", err, st.Size, len(buf3), offset) + } + if m != len(buf3) { + t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf3)) + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + t.Fatal("Error: Incorrect read between two ReadAt from same offset.") + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + t.Fatal("Error:", err, st.Size, len(buf4), offset) + } + if m != len(buf4) { + t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf4)) + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + t.Fatal("Error: Incorrect read between two ReadAt from same offset.") + } + + buf5 := make([]byte, n) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + t.Fatal("Error:", err, len(buf5)) + } + } + if m != len(buf5) { + t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf5)) + } + if !bytes.Equal(buf, buf5) { + t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.") + } + + buf6 := make([]byte, n+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + t.Fatal("Error:", err, len(buf6)) + } + } + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests comprehensive list of all methods. +func TestFunctionalV2(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for the short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano())) + file, err := os.Create(fileName) + if err != nil { + t.Fatal("Error:", err) + } + var totalSize int64 + for i := 0; i < 3; i++ { + buf := make([]byte, rand.Intn(1<<19)) + n, err := file.Write(buf) + if err != nil { + t.Fatal("Error:", err) + } + totalSize += int64(n) + } + file.Close() + + // Verify if bucket exits and you have access. + err = c.BucketExists(bucketName) + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Make the bucket 'public read/write'. + err = c.SetBucketACL(bucketName, "public-read-write") + if err != nil { + t.Fatal("Error:", err) + } + + // Get the previously set acl. + acl, err := c.GetBucketACL(bucketName) + if err != nil { + t.Fatal("Error:", err) + } + + // ACL must be 'public read/write'. + if acl != minio.BucketACL("public-read-write") { + t.Fatal("Error:", acl) + } + + // List all buckets. + buckets, err := c.ListBuckets() + if len(buckets) == 0 { + t.Fatal("Error: list buckets cannot be empty", buckets) + } + if err != nil { + t.Fatal("Error:", err) + } + + // Verify if previously created bucket is listed in list buckets. + bucketFound := false + for _, bucket := range buckets { + if bucket.Name == bucketName { + bucketFound = true + } + } + + // If bucket not found error out. + if !bucketFound { + t.Fatal("Error: bucket ", bucketName, "not found") + } + + objectName := bucketName + "unique" + + // Generate data + buf := make([]byte, rand.Intn(1<<19)) + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error: ", err) + } + + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "") + if err != nil { + t.Fatal("Error: ", err) + } + if n != int64(len(buf)) { + t.Fatal("Error: bad length ", n, len(buf)) + } + + n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName+"-nolength") + } + + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + // Instantiate a done channel to close all listing. + doneCh := make(chan struct{}) + defer close(doneCh) + + objFound := false + isRecursive := true // Recursive is true. + for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + t.Fatal("Error: object " + objectName + " not found.") + } + + incompObjNotFound := true + for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) { + if objIncompl.Key != "" { + incompObjNotFound = false + break + } + } + if !incompObjNotFound { + t.Fatal("Error: unexpected dangling incomplete upload found.") + } + + newReader, err := c.GetObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + + newReadBytes, err := ioutil.ReadAll(newReader) + if err != nil { + t.Fatal("Error: ", err) + } + + if !bytes.Equal(newReadBytes, buf) { + t.Fatal("Error: bytes mismatch.") + } + + err = c.FGetObject(bucketName, objectName, fileName+"-f") + if err != nil { + t.Fatal("Error: ", err) + } + + presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second) + if err != nil { + t.Fatal("Error: ", err) + } + + resp, err := http.Get(presignedGetURL) + if err != nil { + t.Fatal("Error: ", err) + } + if resp.StatusCode != http.StatusOK { + t.Fatal("Error: ", resp.Status) + } + newPresignedBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal("Error: ", err) + } + if !bytes.Equal(newPresignedBytes, buf) { + t.Fatal("Error: bytes mismatch.") + } + + presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second) + if err != nil { + t.Fatal("Error: ", err) + } + buf = make([]byte, rand.Intn(1<<20)) + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error: ", err) + } + req, err := http.NewRequest("PUT", presignedPutURL, bytes.NewReader(buf)) + if err != nil { + t.Fatal("Error: ", err) + } + httpClient := &http.Client{} + resp, err = httpClient.Do(req) + if err != nil { + t.Fatal("Error: ", err) + } + + newReader, err = c.GetObject(bucketName, objectName+"-presigned") + if err != nil { + t.Fatal("Error: ", err) + } + + newReadBytes, err = ioutil.ReadAll(newReader) + if err != nil { + t.Fatal("Error: ", err) + } + + if !bytes.Equal(newReadBytes, buf) { + t.Fatal("Error: bytes mismatch.") + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveObject(bucketName, objectName+"-f") + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveObject(bucketName, objectName+"-nolength") + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveObject(bucketName, objectName+"-presigned") + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } + err = c.RemoveBucket(bucketName) + if err == nil { + t.Fatal("Error:") + } + if err.Error() != "The specified bucket does not exist" { + t.Fatal("Error: ", err) + } + if err = os.Remove(fileName); err != nil { + t.Fatal("Error: ", err) + } + if err = os.Remove(fileName + "-f"); err != nil { + t.Fatal("Error: ", err) + } +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_v4_test.go similarity index 62% rename from Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go rename to Godeps/_workspace/src/github.com/minio/minio-go/api_functional_v4_test.go index f7bd81097..d452d8484 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_v4_test.go @@ -19,6 +19,7 @@ package minio_test import ( "bytes" crand "crypto/rand" + "errors" "io" "io/ioutil" "math/rand" @@ -54,9 +55,10 @@ func randString(n int, src rand.Source) string { return string(b[0:30]) } -func TestResumableFPutObject(t *testing.T) { +// Tests removing partially uploaded objects. +func TestRemovePartiallyUploaded(t *testing.T) { if testing.Short() { - t.Skip("skipping resumable tests with short runs") + t.Skip("skipping function tests for short runs") } // Seed random based on current time. @@ -64,9 +66,9 @@ func TestResumableFPutObject(t *testing.T) { // Connect and make sure bucket exists. c, err := minio.New( - "play.minio.io:9002", - "Q3AM3UQ867SPQQA43P2F", - "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), false, ) if err != nil { @@ -77,12 +79,78 @@ func TestResumableFPutObject(t *testing.T) { c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") // Enable tracing, write to stdout. - // c.TraceOn(nil) + // c.TraceOn(os.Stderr) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) - // make a new bucket. + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + reader, writer := io.Pipe() + go func() { + i := 0 + for i < 25 { + _, err = io.CopyN(writer, crand.Reader, 128*1024) + if err != nil { + t.Fatal("Error:", err, bucketName) + } + i++ + } + writer.CloseWithError(errors.New("Proactively closed to be verified later.")) + }() + + objectName := bucketName + "-resumable" + _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream") + if err == nil { + t.Fatal("Error: PutObject should fail.") + } + if err.Error() != "Proactively closed to be verified later." { + t.Fatal("Error:", err) + } + err = c.RemoveIncompleteUpload(bucketName, objectName) + if err != nil { + t.Fatal("Error:", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests resumable file based put object multipart upload. +func TestResumableFPutObject(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for the short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. err = c.MakeBucket(bucketName, "private", "us-east-1") if err != nil { t.Fatal("Error:", err, bucketName) @@ -93,7 +161,10 @@ func TestResumableFPutObject(t *testing.T) { t.Fatal("Error:", err) } - n, _ := io.CopyN(file, crand.Reader, 11*1024*1024) + n, err := io.CopyN(file, crand.Reader, 11*1024*1024) + if err != nil { + t.Fatal("Error:", err) + } if n != int64(11*1024*1024) { t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) } @@ -127,9 +198,10 @@ func TestResumableFPutObject(t *testing.T) { } } +// Tests resumable put object multipart upload. func TestResumablePutObject(t *testing.T) { if testing.Short() { - t.Skip("skipping resumable tests with short runs") + t.Skip("skipping functional tests for the short runs") } // Seed random based on current time. @@ -137,31 +209,31 @@ func TestResumablePutObject(t *testing.T) { // Connect and make sure bucket exists. c, err := minio.New( - "play.minio.io:9002", - "Q3AM3UQ867SPQQA43P2F", - "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), false, ) if err != nil { t.Fatal("Error:", err) } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + // Set user agent. c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") - // Enable tracing, write to stdout. - // c.TraceOn(nil) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) - // make a new bucket. + // Make a new bucket. err = c.MakeBucket(bucketName, "private", "us-east-1") if err != nil { t.Fatal("Error:", err, bucketName) } - // generate 11MB + // Generate 11MB buf := make([]byte, 11*1024*1024) _, err = io.ReadFull(crand.Reader, buf) @@ -171,7 +243,7 @@ func TestResumablePutObject(t *testing.T) { objectName := bucketName + "-resumable" reader := bytes.NewReader(buf) - n, err := c.PutObject(bucketName, objectName, reader, int64(reader.Len()), "application/octet-stream") + n, err := c.PutObject(bucketName, objectName, reader, "application/octet-stream") if err != nil { t.Fatal("Error:", err, bucketName, objectName) } @@ -190,37 +262,42 @@ func TestResumablePutObject(t *testing.T) { } } -func TestGetObjectPartialFunctional(t *testing.T) { +// Tests get object ReaderSeeker interface methods. +func TestGetObjectReadSeekFunctional(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for short runs") + } + // Seed random based on current time. rand.Seed(time.Now().Unix()) // Connect and make sure bucket exists. c, err := minio.New( - "play.minio.io:9002", - "Q3AM3UQ867SPQQA43P2F", - "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), false, ) if err != nil { t.Fatal("Error:", err) } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + // Set user agent. c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") - // Enable tracing, write to stdout. - // c.TraceOn(nil) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) - // make a new bucket. + // Make a new bucket. err = c.MakeBucket(bucketName, "private", "us-east-1") if err != nil { t.Fatal("Error:", err, bucketName) } - // generate data more than 32K + // Generate data more than 32K buf := make([]byte, rand.Intn(1<<20)+32*1024) _, err = io.ReadFull(crand.Reader, buf) @@ -228,9 +305,123 @@ func TestGetObjectPartialFunctional(t *testing.T) { t.Fatal("Error:", err) } - // save the data + // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano())) - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "binary/octet-stream") + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + st, err := r.Stat() + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + if st.Size != int64(len(buf)) { + t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n", + len(buf), st.Size) + } + + offset := int64(2048) + n, err = r.Seek(offset, 0) + if err != nil { + t.Fatal("Error:", err, offset) + } + if n != offset { + t.Fatalf("Error: number of bytes seeked does not match, want %v, got %v\n", + offset, n) + } + n, err = r.Seek(0, 1) + if err != nil { + t.Fatal("Error:", err) + } + if n != offset { + t.Fatalf("Error: number of current seek does not match, want %v, got %v\n", + offset, n) + } + _, err = r.Seek(offset, 2) + if err == nil { + t.Fatal("Error: seek on positive offset for whence '2' should error out") + } + n, err = r.Seek(-offset, 2) + if err != nil { + t.Fatal("Error:", err) + } + if n != 0 { + t.Fatalf("Error: number of bytes seeked back does not match, want 0, got %v\n", n) + } + var buffer bytes.Buffer + if _, err = io.CopyN(&buffer, r, st.Size); err != nil { + t.Fatal("Error:", err) + } + if !bytes.Equal(buf, buffer.Bytes()) { + t.Fatal("Error: Incorrect read bytes v/s original buffer.") + } + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + +// Tests get object ReaderAt interface methods. +func TestGetObjectReadAtFunctional(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for the short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Generate data more than 32K + buf := make([]byte, rand.Intn(1<<20)+32*1024) + + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error:", err) + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano())) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") if err != nil { t.Fatal("Error:", err, bucketName, objectName) } @@ -240,11 +431,15 @@ func TestGetObjectPartialFunctional(t *testing.T) { } // read the data back - r, st, err := c.GetObjectPartial(bucketName, objectName) + r, err := c.GetObject(bucketName, objectName) if err != nil { t.Fatal("Error:", err, bucketName, objectName) } + st, err := r.Stat() + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } if st.Size != int64(len(buf)) { t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n", len(buf), st.Size) @@ -323,36 +518,41 @@ func TestGetObjectPartialFunctional(t *testing.T) { } } +// Tests comprehensive list of all methods. func TestFunctional(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for the short runs") + } + // Seed random based on current time. rand.Seed(time.Now().Unix()) c, err := minio.New( - "play.minio.io:9002", - "Q3AM3UQ867SPQQA43P2F", - "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), false, ) if err != nil { t.Fatal("Error:", err) } + // Enable to debug + // c.TraceOn(os.Stderr) + // Set user agent. c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") - // Enable tracing, write to stdout. - // c.TraceOn(nil) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) - // make a new bucket. + // Make a new bucket. err = c.MakeBucket(bucketName, "private", "us-east-1") if err != nil { t.Fatal("Error:", err, bucketName) } - // generate a random file name. + // Generate a random file name. fileName := randString(60, rand.NewSource(time.Now().UnixNano())) file, err := os.Create(fileName) if err != nil { @@ -369,31 +569,34 @@ func TestFunctional(t *testing.T) { } file.Close() - // verify if bucket exits and you have access. + // Verify if bucket exits and you have access. err = c.BucketExists(bucketName) if err != nil { t.Fatal("Error:", err, bucketName) } - // make the bucket 'public read/write'. + // Make the bucket 'public read/write'. err = c.SetBucketACL(bucketName, "public-read-write") if err != nil { t.Fatal("Error:", err) } - // get the previously set acl. + // Get the previously set acl. acl, err := c.GetBucketACL(bucketName) if err != nil { t.Fatal("Error:", err) } - // acl must be 'public read/write'. + // ACL must be 'public read/write'. if acl != minio.BucketACL("public-read-write") { t.Fatal("Error:", acl) } - // list all buckets. + // List all buckets. buckets, err := c.ListBuckets() + if len(buckets) == 0 { + t.Fatal("Error: list buckets cannot be empty", buckets) + } if err != nil { t.Fatal("Error:", err) } @@ -413,14 +616,14 @@ func TestFunctional(t *testing.T) { objectName := bucketName + "unique" - // generate data + // Generate data buf := make([]byte, rand.Intn(1<<19)) _, err = io.ReadFull(crand.Reader, buf) if err != nil { t.Fatal("Error: ", err) } - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "") + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "") if err != nil { t.Fatal("Error: ", err) } @@ -428,7 +631,7 @@ func TestFunctional(t *testing.T) { t.Fatal("Error: bad length ", n, len(buf)) } - n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), -1, "binary/octet-stream") + n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream") if err != nil { t.Fatal("Error:", err, bucketName, objectName+"-nolength") } @@ -437,7 +640,34 @@ func TestFunctional(t *testing.T) { t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) } - newReader, _, err := c.GetObject(bucketName, objectName) + // Instantiate a done channel to close all listing. + doneCh := make(chan struct{}) + defer close(doneCh) + + objFound := false + isRecursive := true // Recursive is true. + for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + t.Fatal("Error: object " + objectName + " not found.") + } + + incompObjNotFound := true + for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) { + if objIncompl.Key != "" { + incompObjNotFound = false + break + } + } + if !incompObjNotFound { + t.Fatal("Error: unexpected dangling incomplete upload found.") + } + + newReader, err := c.GetObject(bucketName, objectName) if err != nil { t.Fatal("Error: ", err) } @@ -451,15 +681,7 @@ func TestFunctional(t *testing.T) { t.Fatal("Error: bytes mismatch.") } - n, err = c.FPutObject(bucketName, objectName+"-f", fileName, "text/plain") - if err != nil { - t.Fatal("Error: ", err) - } - if n != totalSize { - t.Fatal("Error: bad length ", n, totalSize) - } - - err = c.FGetObject(bucketName, objectName+"-f", fileName+"-f") + err = c.FGetObject(bucketName, objectName, fileName+"-f") if err != nil { t.Fatal("Error: ", err) } @@ -503,7 +725,7 @@ func TestFunctional(t *testing.T) { t.Fatal("Error: ", err) } - newReader, _, err = c.GetObject(bucketName, objectName+"-presigned") + newReader, err = c.GetObject(bucketName, objectName+"-presigned") if err != nil { t.Fatal("Error: ", err) } @@ -537,11 +759,11 @@ func TestFunctional(t *testing.T) { if err != nil { t.Fatal("Error:", err) } - err = c.RemoveBucket("bucket1") + err = c.RemoveBucket(bucketName) if err == nil { t.Fatal("Error:") } - if err.Error() != "The specified bucket does not exist." { + if err.Error() != "The specified bucket does not exist" { t.Fatal("Error: ", err) } if err = os.Remove(fileName); err != nil { diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_unit_test.go similarity index 60% rename from Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go rename to Godeps/_workspace/src/github.com/minio/minio-go/api_unit_test.go index 2bda99f47..13afcdc45 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api_unit_test.go @@ -17,11 +17,126 @@ package minio import ( + "fmt" + "net/http" "net/url" + "strings" "testing" ) -func TestSignature(t *testing.T) { +func TestEncodeURL2Path(t *testing.T) { + type urlStrings struct { + objName string + encodedObjName string + } + + bucketName := "bucketName" + want := []urlStrings{ + { + objName: "本語", + encodedObjName: "%E6%9C%AC%E8%AA%9E", + }, + { + objName: "本語.1", + encodedObjName: "%E6%9C%AC%E8%AA%9E.1", + }, + { + objName: ">123>3123123", + encodedObjName: "%3E123%3E3123123", + }, + { + objName: "test 1 2.txt", + encodedObjName: "test%201%202.txt", + }, + { + objName: "test++ 1.txt", + encodedObjName: "test%2B%2B%201.txt", + }, + } + + for _, o := range want { + u, err := url.Parse(fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucketName, o.objName)) + if err != nil { + t.Fatal("Error:", err) + } + urlPath := "/" + bucketName + "/" + o.encodedObjName + if urlPath != encodeURL2Path(u) { + t.Fatal("Error") + } + } +} + +func TestErrorResponse(t *testing.T) { + var err error + err = ErrorResponse{ + Code: "Testing", + } + errResp := ToErrorResponse(err) + if errResp.Code != "Testing" { + t.Fatal("Type conversion failed, we have an empty struct.") + } + + // Test http response decoding. + var httpResponse *http.Response + // Set empty variables + httpResponse = nil + var bucketName, objectName string + + // Should fail with invalid argument. + err = HTTPRespToErrorResponse(httpResponse, bucketName, objectName) + errResp = ToErrorResponse(err) + if errResp.Code != "InvalidArgument" { + t.Fatal("Empty response input should return invalid argument.") + } +} + +func TestSignatureCalculation(t *testing.T) { + req, err := http.NewRequest("GET", "https://s3.amazonaws.com", nil) + if err != nil { + t.Fatal("Error:", err) + } + req = SignV4(*req, "", "", "us-east-1") + if req.Header.Get("Authorization") != "" { + t.Fatal("Error: anonymous credentials should not have Authorization header.") + } + + req = PreSignV4(*req, "", "", "us-east-1", 0) + if strings.Contains(req.URL.RawQuery, "X-Amz-Signature") { + t.Fatal("Error: anonymous credentials should not have Signature query resource.") + } + + req = SignV2(*req, "", "") + if req.Header.Get("Authorization") != "" { + t.Fatal("Error: anonymous credentials should not have Authorization header.") + } + + req = PreSignV2(*req, "", "", 0) + if strings.Contains(req.URL.RawQuery, "Signature") { + t.Fatal("Error: anonymous credentials should not have Signature query resource.") + } + + req = SignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1") + if req.Header.Get("Authorization") == "" { + t.Fatal("Error: normal credentials should have Authorization header.") + } + + req = PreSignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1", 0) + if !strings.Contains(req.URL.RawQuery, "X-Amz-Signature") { + t.Fatal("Error: normal credentials should have Signature query resource.") + } + + req = SignV2(*req, "ACCESS-KEY", "SECRET-KEY") + if req.Header.Get("Authorization") == "" { + t.Fatal("Error: normal credentials should have Authorization header.") + } + + req = PreSignV2(*req, "ACCESS-KEY", "SECRET-KEY", 0) + if !strings.Contains(req.URL.RawQuery, "Signature") { + t.Fatal("Error: normal credentials should not have Signature query resource.") + } +} + +func TestSignatureType(t *testing.T) { clnt := Client{} if !clnt.signature.isV4() { t.Fatal("Error") diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml b/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml index 444696bc5..5b8824d45 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml +++ b/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml @@ -26,8 +26,8 @@ build_script: - gofmt -s -l . - golint github.com/minio/minio-go... - deadcode - - go test - - go test -test.short -race + - go test -short -v + - go test -short -race -v # to disable automatic tests test: off diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/bucket-acl.go b/Godeps/_workspace/src/github.com/minio/minio-go/bucket-acl.go index 89c386ca1..d8eda0f54 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/bucket-acl.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/bucket-acl.go @@ -16,7 +16,7 @@ package minio -// BucketACL - bucket level access control. +// BucketACL - Bucket level access control. type BucketACL string // Different types of ACL's currently supported for buckets. @@ -35,7 +35,7 @@ func (b BucketACL) String() string { return string(b) } -// isValidBucketACL - is provided acl string supported. +// isValidBucketACL - Is provided acl string supported. func (b BucketACL) isValidBucketACL() bool { switch true { case b.isPrivate(): @@ -47,29 +47,29 @@ func (b BucketACL) isValidBucketACL() bool { case b.isAuthenticated(): return true case b.String() == "private": - // by default its "private" + // By default its "private" return true default: return false } } -// isPrivate - is acl Private. +// isPrivate - Is acl Private. func (b BucketACL) isPrivate() bool { return b == bucketPrivate } -// isPublicRead - is acl PublicRead. +// isPublicRead - Is acl PublicRead. func (b BucketACL) isReadOnly() bool { return b == bucketReadOnly } -// isPublicReadWrite - is acl PublicReadWrite. +// isPublicReadWrite - Is acl PublicReadWrite. func (b BucketACL) isPublic() bool { return b == bucketPublic } -// isAuthenticated - is acl AuthenticatedRead. +// isAuthenticated - Is acl AuthenticatedRead. func (b BucketACL) isAuthenticated() bool { return b == bucketAuthenticated } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/bucket-cache.go b/Godeps/_workspace/src/github.com/minio/minio-go/bucket-cache.go index d0993ba4a..849aed9fe 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/bucket-cache.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/bucket-cache.go @@ -24,25 +24,26 @@ import ( "sync" ) -// bucketLocationCache provides simple mechansim to hold bucket locations in memory. +// bucketLocationCache - Provides simple mechansim to hold bucket +// locations in memory. type bucketLocationCache struct { - // Mutex is used for handling the concurrent - // read/write requests for cache + // mutex is used for handling the concurrent + // read/write requests for cache. sync.RWMutex // items holds the cached bucket locations. items map[string]string } -// newBucketLocationCache provides a new bucket location cache to be used -// internally with the client object. +// newBucketLocationCache - Provides a new bucket location cache to be +// used internally with the client object. func newBucketLocationCache() *bucketLocationCache { return &bucketLocationCache{ items: make(map[string]string), } } -// Get returns a value of a given key if it exists +// Get - Returns a value of a given key if it exists. func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) { r.RLock() defer r.RUnlock() @@ -50,21 +51,21 @@ func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) return } -// Set will persist a value to the cache +// Set - Will persist a value into cache. func (r *bucketLocationCache) Set(bucketName string, location string) { r.Lock() defer r.Unlock() r.items[bucketName] = location } -// Delete deletes a bucket name. +// Delete - Deletes a bucket name from cache. func (r *bucketLocationCache) Delete(bucketName string) { r.Lock() defer r.Unlock() delete(r.items, bucketName) } -// getBucketLocation - get location for the bucketName from location map cache. +// getBucketLocation - Get location for the bucketName from location map cache. func (c Client) getBucketLocation(bucketName string) (string, error) { // For anonymous requests, default to "us-east-1" and let other calls // move forward. @@ -101,12 +102,12 @@ func (c Client) getBucketLocation(bucketName string) (string, error) { } location := locationConstraint - // location is empty will be 'us-east-1'. + // Location is empty will be 'us-east-1'. if location == "" { location = "us-east-1" } - // location can be 'EU' convert it to meaningful 'eu-west-1'. + // Location can be 'EU' convert it to meaningful 'eu-west-1'. if location == "EU" { location = "eu-west-1" } @@ -118,7 +119,7 @@ func (c Client) getBucketLocation(bucketName string) (string, error) { return location, nil } -// getBucketLocationRequest wrapper creates a new getBucketLocation request. +// getBucketLocationRequest - Wrapper creates a new getBucketLocation request. func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) { // Set location query. urlValues := make(url.Values) @@ -129,16 +130,16 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro targetURL.Path = filepath.Join(bucketName, "") targetURL.RawQuery = urlValues.Encode() - // get a new HTTP request for the method. + // Get a new HTTP request for the method. req, err := http.NewRequest("GET", targetURL.String(), nil) if err != nil { return nil, err } - // set UserAgent for the request. + // Set UserAgent for the request. c.setUserAgent(req) - // set sha256 sum for signature calculation only with signature version '4'. + // Set sha256 sum for signature calculation only with signature version '4'. if c.signature.isV4() { req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/common-methods.go b/Godeps/_workspace/src/github.com/minio/minio-go/common-methods.go deleted file mode 100644 index 636e06f6f..000000000 --- a/Godeps/_workspace/src/github.com/minio/minio-go/common-methods.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "crypto/hmac" - "crypto/md5" - "crypto/sha256" - "encoding/xml" - "io" -) - -// xmlDecoder provide decoded value in xml. -func xmlDecoder(body io.Reader, v interface{}) error { - d := xml.NewDecoder(body) - return d.Decode(v) -} - -// sum256 calculate sha256 sum for an input byte array. -func sum256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -// sumMD5 calculate md5 sum for an input byte array. -func sumMD5(data []byte) []byte { - hash := md5.New() - hash.Write(data) - return hash.Sum(nil) -} - -// sumHMAC calculate hmac between two input byte array. -func sumHMAC(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/constants.go b/Godeps/_workspace/src/github.com/minio/minio-go/constants.go index f4978019f..c97803b8d 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/constants.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/constants.go @@ -25,14 +25,18 @@ const minimumPartSize = 1024 * 1024 * 5 // maxParts - maximum parts for a single multipart session. const maxParts = 10000 -// maxPartSize - maximum part size 5GiB for a single multipart upload operation. +// maxPartSize - maximum part size 5GiB for a single multipart upload +// operation. const maxPartSize = 1024 * 1024 * 1024 * 5 -// maxSinglePutObjectSize - maximum size 5GiB of object per PUT operation. +// maxSinglePutObjectSize - maximum size 5GiB of object per PUT +// operation. const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 -// maxMultipartPutObjectSize - maximum size 5TiB of object for Multipart operation. +// maxMultipartPutObjectSize - maximum size 5TiB of object for +// Multipart operation. const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 -// optimalReadAtBufferSize - optimal buffer 5MiB used for reading through ReadAt operation. +// optimalReadAtBufferSize - optimal buffer 5MiB used for reading +// through ReadAt operation. const optimalReadAtBufferSize = 1024 * 1024 * 5 diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobject.go index 041a136c1..de0b12cc3 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobject.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobject.go @@ -29,28 +29,40 @@ import ( func main() { // Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values. - // Requests are always secure by default. set inSecure=true to enable insecure access. - // inSecure boolean is the last argument for New(). + // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). - // New provides a client object backend by automatically detected signature type based - // on the provider. + // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically + // determined based on the Endpoint value. s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - reader, _, err := s3Client.GetObject("my-bucketname", "my-objectname") + reader, err := s3Client.GetObject("my-bucketname", "my-objectname") if err != nil { log.Fatalln(err) } + defer reader.Close() - localfile, err := os.Create("my-testfile") + reader, err := s3Client.GetObject("my-bucketname", "my-objectname") + if err != nil { + log.Fatalln(err) + } + defer reader.Close() + + localFile, err := os.Create("my-testfile") if err != nil { log.Fatalln(err) } defer localfile.Close() - if _, err = io.Copy(localfile, reader); err != nil { + stat, err := reader.Stat() + if err != nil { + log.Fatalln(err) + } + + if _, err := io.CopyN(localFile, reader, stat.Size); err != nil { log.Fatalln(err) } } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobjectpartial.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobjectpartial.go deleted file mode 100644 index db65359ca..000000000 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobjectpartial.go +++ /dev/null @@ -1,91 +0,0 @@ -// +build ignore - -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "errors" - "io" - "log" - "os" - - "github.com/minio/minio-go" -) - -func main() { - // Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values. - - // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. - // This boolean value is the last argument for New(). - - // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically - // determined based on the Endpoint value. - s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) - if err != nil { - log.Fatalln(err) - } - - reader, stat, err := s3Client.GetObjectPartial("my-bucketname", "my-objectname") - if err != nil { - log.Fatalln(err) - } - defer reader.Close() - - localFile, err := os.OpenFile("my-testfile", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) - if err != nil { - log.Fatalln(err) - } - defer localfile.Close() - - st, err := localFile.Stat() - if err != nil { - log.Fatalln(err) - } - - readAtOffset := st.Size() - readAtBuffer := make([]byte, 5*1024*1024) - - // Loop and write. - for { - readAtSize, rerr := reader.ReadAt(readAtBuffer, readAtOffset) - if rerr != nil { - if rerr != io.EOF { - log.Fatalln(rerr) - } - } - writeSize, werr := localFile.Write(readAtBuffer[:readAtSize]) - if werr != nil { - log.Fatalln(werr) - } - if readAtSize != writeSize { - log.Fatalln(errors.New("Something really bad happened here.")) - } - readAtOffset += int64(writeSize) - if rerr == io.EOF { - break - } - } - - // totalWritten size. - totalWritten := readAtOffset - - // If found mismatch error out. - if totalWritten != stat.Size { - log.Fatalln(errors.New("Something really bad happened here.")) - } -} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobject.go index d7efb7b43..073f75870 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobject.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobject.go @@ -44,8 +44,7 @@ func main() { } defer object.Close() - st, _ := object.Stat() - n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, st.Size(), "application/octet-stream") + n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream") if err != nil { log.Fatalln(err) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobjectpartial.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobjectpartial.go deleted file mode 100644 index aff67f8e9..000000000 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobjectpartial.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build ignore - -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "log" - "os" - - "github.com/minio/minio-go" -) - -func main() { - // Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values. - - // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. - // This boolean value is the last argument for New(). - - // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically - // determined based on the Endpoint value. - s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) - if err != nil { - log.Fatalln(err) - } - - localFile, err := os.Open("testfile") - if err != nil { - log.Fatalln(err) - } - - st, err := localFile.Stat() - if err != nil { - log.Fatalln(err) - } - defer localFile.Close() - - _, err = s3Client.PutObjectPartial("bucket-name", "objectName", localFile, st.Size(), "text/plain") - if err != nil { - log.Fatalln(err) - } -} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobject.go index 0125491ab..9413dc5e5 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobject.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobject.go @@ -35,23 +35,29 @@ func main() { // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } - reader, _, err := s3Client.GetObject("my-bucketname", "my-objectname") + reader, err := s3Client.GetObject("my-bucketname", "my-objectname") if err != nil { log.Fatalln(err) } + defer reader.Close() - localfile, err := os.Create("my-testfile") + localFile, err := os.Create("my-testfile") if err != nil { log.Fatalln(err) } defer localfile.Close() - if _, err = io.Copy(localfile, reader); err != nil { + stat, err := reader.Stat() + if err != nil { + log.Fatalln(err) + } + + if _, err := io.CopyN(localFile, reader, stat.Size); err != nil { log.Fatalln(err) } } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobjectpartial.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobjectpartial.go deleted file mode 100644 index 2c32c8449..000000000 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobjectpartial.go +++ /dev/null @@ -1,92 +0,0 @@ -// +build ignore - -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "errors" - "io" - "log" - "os" - - "github.com/minio/minio-go" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and - // my-testfile are dummy values, please replace them with original values. - - // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. - // This boolean value is the last argument for New(). - - // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically - // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) - if err != nil { - log.Fatalln(err) - } - - reader, stat, err := s3Client.GetObjectPartial("my-bucketname", "my-objectname") - if err != nil { - log.Fatalln(err) - } - defer reader.Close() - - localFile, err := os.OpenFile("my-testfile", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) - if err != nil { - log.Fatalln(err) - } - defer localfile.Close() - - st, err := localFile.Stat() - if err != nil { - log.Fatalln(err) - } - - readAtOffset := st.Size() - readAtBuffer := make([]byte, 5*1024*1024) - - // For loop. - for { - readAtSize, rerr := reader.ReadAt(readAtBuffer, readAtOffset) - if rerr != nil { - if rerr != io.EOF { - log.Fatalln(rerr) - } - } - writeSize, werr := localFile.Write(readAtBuffer[:readAtSize]) - if werr != nil { - log.Fatalln(werr) - } - if readAtSize != writeSize { - log.Fatalln(errors.New("Something really bad happened here.")) - } - readAtOffset += int64(writeSize) - if rerr == io.EOF { - break - } - } - - // totalWritten size. - totalWritten := readAtOffset - - // If found mismatch error out. - if totalWritten != stat.Size { - log.Fatalln(errors.New("Something really bad happened here.")) - } -} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobject.go index 963060487..2ba90a697 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobject.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobject.go @@ -45,8 +45,7 @@ func main() { } defer object.Close() - st, _ := object.Stat() - n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, st.Size(), "application/octet-stream") + n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream") if err != nil { log.Fatalln(err) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobjectpartial.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobjectpartial.go deleted file mode 100644 index e59b2ad4d..000000000 --- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobjectpartial.go +++ /dev/null @@ -1,57 +0,0 @@ -// +build ignore - -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "log" - "os" - - "github.com/minio/minio-go" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and - // my-testfile are dummy values, please replace them with original values. - - // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. - // This boolean value is the last argument for New(). - - // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically - // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) - if err != nil { - log.Fatalln(err) - } - - localFile, err := os.Open("my-testfile") - if err != nil { - log.Fatalln(err) - } - - st, err := localFile.Stat() - if err != nil { - log.Fatalln(err) - } - defer localFile.Close() - - _, err = s3Client.PutObjectPartial("my-bucketname", "my-objectname", localFile, st.Size(), "text/plain") - if err != nil { - log.Fatalln(err) - } -} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/post-policy.go b/Godeps/_workspace/src/github.com/minio/minio-go/post-policy.go index 2d3082755..2a675d770 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/post-policy.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/post-policy.go @@ -2,7 +2,6 @@ package minio import ( "encoding/base64" - "errors" "fmt" "strings" "time" @@ -11,7 +10,8 @@ import ( // expirationDateFormat date format for expiration key in json policy. const expirationDateFormat = "2006-01-02T15:04:05.999Z" -// policyCondition explanation: http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html +// policyCondition explanation: +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html // // Example: // @@ -27,11 +27,15 @@ type policyCondition struct { value string } -// PostPolicy provides strict static type conversion and validation for Amazon S3's POST policy JSON string. +// PostPolicy - Provides strict static type conversion and validation +// for Amazon S3's POST policy JSON string. type PostPolicy struct { - expiration time.Time // expiration date and time of the POST policy. - conditions []policyCondition // collection of different policy conditions. - // contentLengthRange minimum and maximum allowable size for the uploaded content. + // Expiration date and time of the POST policy. + expiration time.Time + // Collection of different policy conditions. + conditions []policyCondition + // ContentLengthRange minimum and maximum allowable size for the + // uploaded content. contentLengthRange struct { min int64 max int64 @@ -41,7 +45,7 @@ type PostPolicy struct { formData map[string]string } -// NewPostPolicy instantiate new post policy. +// NewPostPolicy - Instantiate new post policy. func NewPostPolicy() *PostPolicy { p := &PostPolicy{} p.conditions = make([]policyCondition, 0) @@ -49,19 +53,19 @@ func NewPostPolicy() *PostPolicy { return p } -// SetExpires expiration time. +// SetExpires - Sets expiration time for the new policy. func (p *PostPolicy) SetExpires(t time.Time) error { if t.IsZero() { - return errors.New("No expiry time set.") + return ErrInvalidArgument("No expiry time set.") } p.expiration = t return nil } -// SetKey Object name. +// SetKey - Sets an object name for the policy based upload. func (p *PostPolicy) SetKey(key string) error { if strings.TrimSpace(key) == "" || key == "" { - return errors.New("Object name is not specified.") + return ErrInvalidArgument("Object name is empty.") } policyCond := policyCondition{ matchType: "eq", @@ -75,10 +79,11 @@ func (p *PostPolicy) SetKey(key string) error { return nil } -// SetKeyStartsWith Object name that can start with. +// SetKeyStartsWith - Sets an object name that an policy based upload +// can start with. func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { - return errors.New("Object prefix is not specified.") + return ErrInvalidArgument("Object prefix is empty.") } policyCond := policyCondition{ matchType: "starts-with", @@ -92,10 +97,10 @@ func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { return nil } -// SetBucket bucket name. +// SetBucket - Sets bucket at which objects will be uploaded to. func (p *PostPolicy) SetBucket(bucketName string) error { if strings.TrimSpace(bucketName) == "" || bucketName == "" { - return errors.New("Bucket name is not specified.") + return ErrInvalidArgument("Bucket name is empty.") } policyCond := policyCondition{ matchType: "eq", @@ -109,10 +114,11 @@ func (p *PostPolicy) SetBucket(bucketName string) error { return nil } -// SetContentType content-type. +// SetContentType - Sets content-type of the object for this policy +// based upload. func (p *PostPolicy) SetContentType(contentType string) error { if strings.TrimSpace(contentType) == "" || contentType == "" { - return errors.New("No content type specified.") + return ErrInvalidArgument("No content type specified.") } policyCond := policyCondition{ matchType: "eq", @@ -126,16 +132,17 @@ func (p *PostPolicy) SetContentType(contentType string) error { return nil } -// SetContentLengthRange - set new min and max content length condition. +// SetContentLengthRange - Set new min and max content length +// condition for all incoming uploads. func (p *PostPolicy) SetContentLengthRange(min, max int64) error { if min > max { - return errors.New("minimum limit is larger than maximum limit") + return ErrInvalidArgument("Minimum limit is larger than maximum limit.") } if min < 0 { - return errors.New("minimum limit cannot be negative") + return ErrInvalidArgument("Minimum limit cannot be negative.") } if max < 0 { - return errors.New("maximum limit cannot be negative") + return ErrInvalidArgument("Maximum limit cannot be negative.") } p.contentLengthRange.min = min p.contentLengthRange.max = max @@ -145,18 +152,18 @@ func (p *PostPolicy) SetContentLengthRange(min, max int64) error { // addNewPolicy - internal helper to validate adding new policies. func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { - return errors.New("Policy fields empty.") + return ErrInvalidArgument("Policy fields are empty.") } p.conditions = append(p.conditions, policyCond) return nil } -// Stringer interface for printing in pretty manner. +// Stringer interface for printing policy in json formatted string. func (p PostPolicy) String() string { return string(p.marshalJSON()) } -// marshalJSON provides Marshalled JSON. +// marshalJSON - Provides Marshalled JSON in bytes. func (p PostPolicy) marshalJSON() []byte { expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` var conditionsStr string @@ -178,7 +185,7 @@ func (p PostPolicy) marshalJSON() []byte { return []byte(retStr) } -// base64 produces base64 of PostPolicy's Marshalled json. +// base64 - Produces base64 of PostPolicy's Marshalled json. func (p PostPolicy) base64() string { return base64.StdEncoding.EncodeToString(p.marshalJSON()) } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v2.go b/Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v2.go index 956b04f23..055fd8598 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v2.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v2.go @@ -30,7 +30,7 @@ import ( "time" ) -// signature and API related constants. +// Signature and API related constants. const ( signV2Algorithm = "AWS" ) @@ -55,14 +55,14 @@ func encodeURL2Path(u *url.URL) (path string) { } // PreSignV2 - presign the request in following style. -// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE} +// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request { - // presign is a noop for anonymous credentials. + // Presign is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { - return nil + return &req } d := time.Now().UTC() - // Add date if not present + // Add date if not present. if date := req.Header.Get("Date"); date == "" { req.Header.Set("Date", d.Format(http.TimeFormat)) } @@ -73,12 +73,12 @@ func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in // Find epoch expires when the request will expire. epochExpires := d.Unix() + expires - // get string to sign. + // Get string to sign. stringToSign := fmt.Sprintf("%s\n\n\n%d\n%s", req.Method, epochExpires, path) hm := hmac.New(sha1.New, []byte(secretAccessKey)) hm.Write([]byte(stringToSign)) - // calculate signature. + // Calculate signature. signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) query := req.URL.Query() @@ -98,7 +98,8 @@ func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in return &req } -// PostPresignSignatureV2 - presigned signature for PostPolicy request +// PostPresignSignatureV2 - presigned signature for PostPolicy +// request. func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { hm := hmac.New(sha1.New, []byte(secretAccessKey)) hm.Write([]byte(policyBase64)) @@ -124,6 +125,11 @@ func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { // SignV2 sign the request before Do() (AWS Signature Version 2). func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + // Initial time. d := time.Now().UTC() @@ -160,11 +166,11 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request // CanonicalizedResource; func getStringToSignV2(req http.Request) string { buf := new(bytes.Buffer) - // write standard headers. + // Write standard headers. writeDefaultHeaders(buf, req) - // write canonicalized protocol headers if any. + // Write canonicalized protocol headers if any. writeCanonicalizedHeaders(buf, req) - // write canonicalized Query resources if any. + // Write canonicalized Query resources if any. writeCanonicalizedResource(buf, req) return buf.String() } @@ -186,7 +192,7 @@ func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { var protoHeaders []string vals := make(map[string][]string) for k, vv := range req.Header { - // all the AMZ and GOOG headers should be lowercase + // All the AMZ headers should be lowercase lk := strings.ToLower(k) if strings.HasPrefix(lk, "x-amz") { protoHeaders = append(protoHeaders, lk) @@ -246,6 +252,7 @@ var resourceList = []string{ // + // [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) error { + // Save request URL. requestURL := req.URL // Get encoded URL path. @@ -256,20 +263,21 @@ func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) error { if requestURL.RawQuery != "" { var n int vals, _ := url.ParseQuery(requestURL.RawQuery) - // loop through all the supported resourceList. + // Verify if any sub resource queries are present, if yes + // canonicallize them. for _, resource := range resourceList { if vv, ok := vals[resource]; ok && len(vv) > 0 { n++ - // first element + // First element switch n { case 1: buf.WriteByte('?') - // the rest + // The rest default: buf.WriteByte('&') } buf.WriteString(resource) - // request parameters + // Request parameters if len(vv[0]) > 0 { buf.WriteByte('=') buf.WriteString(url.QueryEscape(vv[0])) diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v4.go b/Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v4.go index 515d8ab18..27c292a55 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v4.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v4.go @@ -26,7 +26,7 @@ import ( "time" ) -// signature and API related constants. +// Signature and API related constants. const ( signV4Algorithm = "AWS4-HMAC-SHA256" iso8601DateFormat = "20060102T150405Z" @@ -34,28 +34,35 @@ const ( ) /// -/// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. +/// Excerpts from @lsegal - +/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. /// /// User-Agent: /// -/// This is ignored from signing because signing this causes problems with generating pre-signed URLs -/// (that are executed by other agents) or when customers pass requests through proxies, which may -/// modify the user-agent. +/// This is ignored from signing because signing this causes +/// problems with generating pre-signed URLs (that are executed +/// by other agents) or when customers pass requests through +/// proxies, which may modify the user-agent. /// /// Content-Length: /// -/// This is ignored from signing because generating a pre-signed URL should not provide a content-length -/// constraint, specifically when vending a S3 pre-signed PUT URL. The corollary to this is that when -/// sending regular requests (non-pre-signed), the signature contains a checksum of the body, which -/// implicitly validates the payload length (since changing the number of bytes would change the checksum) +/// This is ignored from signing because generating a pre-signed +/// URL should not provide a content-length constraint, +/// specifically when vending a S3 pre-signed PUT URL. The +/// corollary to this is that when sending regular requests +/// (non-pre-signed), the signature contains a checksum of the +/// body, which implicitly validates the payload length (since +/// changing the number of bytes would change the checksum) /// and therefore this header is not valuable in the signature. /// /// Content-Type: /// -/// Signing this header causes quite a number of problems in browser environments, where browsers -/// like to modify and normalize the content-type header in different ways. There is more information -/// on this in https://github.com/aws/aws-sdk-js/issues/244. Avoiding this field simplifies logic -/// and reduces the possibility of future bugs +/// Signing this header causes quite a number of problems in +/// browser environments, where browsers like to modify and +/// normalize the content-type header in different ways. There is +/// more information on this in https://goo.gl/2E9gyy. Avoiding +/// this field simplifies logic and reduces the possibility of +/// future bugs. /// /// Authorization: /// @@ -68,7 +75,7 @@ var ignoredHeaders = map[string]bool{ "User-Agent": true, } -// getSigningKey hmac seed to calculate final signature +// getSigningKey hmac seed to calculate final signature. func getSigningKey(secret, loc string, t time.Time) []byte { date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) location := sumHMAC(date, []byte(loc)) @@ -77,12 +84,13 @@ func getSigningKey(secret, loc string, t time.Time) []byte { return signingKey } -// getSignature final signature in hexadecimal form +// getSignature final signature in hexadecimal form. func getSignature(signingKey []byte, stringToSign string) string { return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) } -// getScope generate a string of a specific date, an AWS region, and a service +// getScope generate a string of a specific date, an AWS region, and a +// service. func getScope(location string, t time.Time) string { scope := strings.Join([]string{ t.Format(yyyymmdd), @@ -93,13 +101,14 @@ func getScope(location string, t time.Time) string { return scope } -// getCredential generate a credential string +// getCredential generate a credential string. func getCredential(accessKeyID, location string, t time.Time) string { scope := getScope(location, t) return accessKeyID + "/" + scope } -// getHashedPayload get the hexadecimal value of the SHA256 hash of the request payload +// getHashedPayload get the hexadecimal value of the SHA256 hash of +// the request payload. func getHashedPayload(req http.Request) string { hashedPayload := req.Header.Get("X-Amz-Content-Sha256") if hashedPayload == "" { @@ -109,7 +118,8 @@ func getHashedPayload(req http.Request) string { return hashedPayload } -// getCanonicalHeaders generate a list of request headers for signature. +// getCanonicalHeaders generate a list of request headers for +// signature. func getCanonicalHeaders(req http.Request) string { var headers []string vals := make(map[string][]string) @@ -124,6 +134,8 @@ func getCanonicalHeaders(req http.Request) string { sort.Strings(headers) var buf bytes.Buffer + // Save all the headers in canonical form
: newline + // separated for each header. for _, k := range headers { buf.WriteString(k) buf.WriteByte(':') @@ -145,12 +157,13 @@ func getCanonicalHeaders(req http.Request) string { } // getSignedHeaders generate all signed request headers. -// i.e alphabetically sorted, semicolon-separated list of lowercase request header names +// i.e lexically sorted, semicolon-separated list of lowercase +// request header names. func getSignedHeaders(req http.Request) string { var headers []string for k := range req.Header { if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { - continue // ignored header + continue // Ignored header found continue. } headers = append(headers, strings.ToLower(k)) } @@ -168,7 +181,6 @@ func getSignedHeaders(req http.Request) string { // \n // \n // -// func getCanonicalRequest(req http.Request) string { req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) canonicalRequest := strings.Join([]string{ @@ -193,20 +205,21 @@ func getStringToSignV4(t time.Time, location, canonicalRequest string) string { // PreSignV4 presign the request, in accordance with // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request { - // presign is a noop for anonymous credentials. + // Presign is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { - return nil + return &req } + // Initial time. t := time.Now().UTC() - // get credential string. + // Get credential string. credential := getCredential(accessKeyID, location, t) // Get all signed headers. signedHeaders := getSignedHeaders(req) - // set URL query. + // Set URL query. query := req.URL.Query() query.Set("X-Amz-Algorithm", signV4Algorithm) query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) @@ -221,10 +234,10 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, // Get string to sign from canonical request. stringToSign := getStringToSignV4(t, location, canonicalRequest) - // get hmac signing key. + // Gext hmac signing key. signingKey := getSigningKey(secretAccessKey, location, t) - // calculate signature. + // Calculate signature. signature := getSignature(signingKey, stringToSign) // Add signature header to RawQuery. @@ -233,9 +246,12 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, return &req } -// PostPresignSignatureV4 - presigned signature for PostPolicy requests. +// PostPresignSignatureV4 - presigned signature for PostPolicy +// requests. func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { + // Get signining key. signingkey := getSigningKey(secretAccessKey, location, t) + // Calculate signature. signature := getSignature(signingkey, policyBase64) return signature } @@ -243,6 +259,11 @@ func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l // SignV4 sign the request before Do(), in accordance with // http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + // Initial time. t := time.Now().UTC() @@ -255,19 +276,19 @@ func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht // Get string to sign from canonical request. stringToSign := getStringToSignV4(t, location, canonicalRequest) - // get hmac signing key. + // Get hmac signing key. signingKey := getSigningKey(secretAccessKey, location, t) - // get credential string. + // Get credential string. credential := getCredential(accessKeyID, location, t) // Get all signed headers. signedHeaders := getSignedHeaders(req) - // calculate signature. + // Calculate signature. signature := getSignature(signingKey, stringToSign) - // if regular request, construct the final authorization header. + // If regular request, construct the final authorization header. parts := []string{ signV4Algorithm + " Credential=" + credential, "SignedHeaders=" + signedHeaders, diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/signature-type.go b/Godeps/_workspace/src/github.com/minio/minio-go/signature-type.go index 8eec3f0eb..cae74cd01 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/signature-type.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/signature-type.go @@ -1,3 +1,19 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package minio // SignatureType is type of Authorization requested for a given HTTP request. diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/tempfile.go b/Godeps/_workspace/src/github.com/minio/minio-go/tempfile.go index e9fada3e6..65c7b0da1 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/tempfile.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/tempfile.go @@ -37,7 +37,7 @@ func newTempFile(prefix string) (*tempFile, error) { } return &tempFile{ File: file, - mutex: new(sync.Mutex), + mutex: &sync.Mutex{}, }, nil } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/utils.go b/Godeps/_workspace/src/github.com/minio/minio-go/utils.go index 2e2532b6c..2d92fc8bc 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/utils.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/utils.go @@ -17,7 +17,10 @@ package minio import ( + "crypto/hmac" + "crypto/sha256" "encoding/hex" + "encoding/xml" "io" "io/ioutil" "net" @@ -29,6 +32,26 @@ import ( "unicode/utf8" ) +// xmlDecoder provide decoded value in xml. +func xmlDecoder(body io.Reader, v interface{}) error { + d := xml.NewDecoder(body) + return d.Decode(v) +} + +// sum256 calculate sha256 sum for an input byte array. +func sum256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + // isPartUploaded - true if part is already uploaded. func isPartUploaded(objPart objectPart, objectParts map[int]objectPart) (isUploaded bool) { _, isUploaded = objectParts[objPart.PartNumber] @@ -261,7 +284,6 @@ func isValidObjectPrefix(objectPrefix string) error { // - if input object size is -1 then return maxPartSize. // - if it happens to be that partSize is indeed bigger // than the maximum part size just return maxPartSize. -// func optimalPartSize(objectSize int64) int64 { // if object size is -1 choose part size as 5GiB. if objectSize == -1 { From 289aee94489a15dbc234abed87e678e7057893ac Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 7 Jan 2016 21:06:54 +0100 Subject: [PATCH 49/55] Adapt s3 backend to new library --- backend/s3/s3.go | 42 +++++++++++++++++------------------------- 1 file changed, 17 insertions(+), 25 deletions(-) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 6872313c5..8aa4f3065 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -3,7 +3,6 @@ package s3 import ( "bytes" "errors" - "fmt" "io" "strings" @@ -120,7 +119,7 @@ func (bb *s3Blob) Finalize(t backend.Type, name string) error { <-bb.b.connChan debug.Log("s3.Finalize", "PutObject(%v, %v, %v, %v)", bb.b.bucketname, path, int64(bb.buf.Len()), "binary/octet-stream") - n, err := bb.b.client.PutObject(bb.b.bucketname, path, bb.buf, int64(bb.buf.Len()), "binary/octet-stream") + n, err := bb.b.client.PutObject(bb.b.bucketname, path, bb.buf, "binary/octet-stream") debug.Log("s3.Finalize", "finalized %v -> n %v, err %#v", path, n, err) bb.b.connChan <- struct{}{} @@ -150,9 +149,13 @@ func (be *S3Backend) Create() (backend.Blob, error) { // name. The reader should be closed after draining it. func (be *S3Backend) Get(t backend.Type, name string) (io.ReadCloser, error) { path := s3path(t, name) - rc, _, err := be.client.GetObject(be.bucketname, path) + rc, err := be.client.GetObject(be.bucketname, path) debug.Log("s3.Get", "%v %v -> err %v", t, name, err) - return rc, err + if err != nil { + return nil, err + } + + return rc, nil } // GetReader returns an io.ReadCloser for the Blob with the given name of @@ -160,35 +163,24 @@ func (be *S3Backend) Get(t backend.Type, name string) (io.ReadCloser, error) { func (be *S3Backend) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) { debug.Log("s3.GetReader", "%v %v, offset %v len %v", t, name, offset, length) path := s3path(t, name) - rd, stat, err := be.client.GetObjectPartial(be.bucketname, path) - debug.Log("s3.GetReader", " stat %v, err %v", stat, err) + obj, err := be.client.GetObject(be.bucketname, path) if err != nil { + debug.Log("s3.GetReader", " err %v", err) return nil, err } - l, o := int64(length), int64(offset) - - if l == 0 { - l = stat.Size + if offset > 0 { + _, err = obj.Seek(int64(offset), 0) + if err != nil { + return nil, err + } } - if o > stat.Size { - return nil, fmt.Errorf("offset beyond end of file (%v > %v)", o, stat.Size) + if length == 0 { + return obj, nil } - if o+l > stat.Size { - l = stat.Size - o - } - - debug.Log("s3.GetReader", "%v %v, o %v l %v", t, name, o, l) - - var r io.Reader - r = &ContinuousReader{R: rd, Offset: o} - if length > 0 { - r = io.LimitReader(r, int64(length)) - } - - return backend.ReadCloser(r), nil + return backend.LimitReadCloser(obj, int64(length)), nil } // Test returns true if a blob of the given type and name exists in the backend. From 6a56d5b87b75d017aa93e748b3b0bac051f84632 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 7 Jan 2016 21:06:45 +0100 Subject: [PATCH 50/55] Repo: Add more debug --- repository/key.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/repository/key.go b/repository/key.go index 40e75f11d..22ed2ca2e 100644 --- a/repository/key.go +++ b/repository/key.go @@ -13,6 +13,7 @@ import ( "github.com/restic/restic/backend" "github.com/restic/restic/crypto" + "github.com/restic/restic/debug" ) var ( @@ -58,6 +59,7 @@ func createMasterKey(s *Repository, password string) (*Key, error) { func OpenKey(s *Repository, name string, password string) (*Key, error) { k, err := LoadKey(s, name) if err != nil { + debug.Log("OpenKey", "LoadKey(%v) returned error %v", name[:12], err) return nil, err } @@ -82,6 +84,7 @@ func OpenKey(s *Repository, name string, password string) (*Key, error) { k.master = &crypto.Key{} err = json.Unmarshal(buf, k.master) if err != nil { + debug.Log("OpenKey", "Unmarshal() returned error %v", err) return nil, err } k.name = name @@ -100,11 +103,14 @@ func SearchKey(s *Repository, password string) (*Key, error) { done := make(chan struct{}) defer close(done) for name := range s.Backend().List(backend.Key, done) { + debug.Log("SearchKey", "trying key %v", name[:12]) key, err := OpenKey(s, name, password) if err != nil { + debug.Log("SearchKey", "key %v returned error %v", name[:12], err) continue } + debug.Log("SearchKey", "successfully opened key %v", name[:12]) return key, nil } From 1483e15e4ec49d6995da6ab4e23b11396fd1949c Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Fri, 8 Jan 2016 21:01:06 +0100 Subject: [PATCH 51/55] Update s3 library (again) --- Godeps/Godeps.json | 4 +- .../src/github.com/minio/minio-go/api-get.go | 41 +++-- .../minio/minio-go/api-put-object-common.go | 167 ++++++++++++++++++ .../minio/minio-go/api-put-object-file.go | 90 ++-------- .../minio-go/api-put-object-multipart.go | 83 ++------- .../minio/minio-go/api-put-object-readat.go | 26 ++- .../minio/minio-go/api_functional_v2_test.go | 84 +++++++++ .../minio/minio-go/api_functional_v4_test.go | 84 +++++++++ 8 files changed, 398 insertions(+), 181 deletions(-) create mode 100644 Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-common.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 03b9fcb05..b4ae781ee 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -24,8 +24,8 @@ }, { "ImportPath": "github.com/minio/minio-go", - "Comment": "v0.2.5-205-g38be406", - "Rev": "38be40605dc37d2d7ec06169218365b46ae33e4b" + "Comment": "v0.2.5-209-g77f35ea", + "Rev": "77f35ea56099f50b0425d0e2f3949773dae723c0" }, { "ImportPath": "github.com/pkg/sftp", diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go index 46643a5c7..ca00eaa15 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go @@ -228,6 +228,9 @@ type Object struct { currOffset int64 objectInfo ObjectInfo + // Keeps track of closed call. + isClosed bool + // Previous error saved for future calls. prevErr error } @@ -244,16 +247,16 @@ func (o *Object) Read(b []byte) (n int, err error) { o.mutex.Lock() defer o.mutex.Unlock() + // Previous prevErr is which was saved in previous operation. + if o.prevErr != nil || o.isClosed { + return 0, o.prevErr + } + // If current offset has reached Size limit, return EOF. if o.currOffset >= o.objectInfo.Size { return 0, io.EOF } - // Previous prevErr is which was saved in previous operation. - if o.prevErr != nil { - return 0, o.prevErr - } - // Send current information over control channel to indicate we // are ready. reqMsg := readRequest{} @@ -297,7 +300,7 @@ func (o *Object) Stat() (ObjectInfo, error) { o.mutex.Lock() defer o.mutex.Unlock() - if o.prevErr != nil { + if o.prevErr != nil || o.isClosed { return ObjectInfo{}, o.prevErr } @@ -317,17 +320,17 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { o.mutex.Lock() defer o.mutex.Unlock() + // prevErr is which was saved in previous operation. + if o.prevErr != nil || o.isClosed { + return 0, o.prevErr + } + // If offset is negative and offset is greater than or equal to // object size we return EOF. if offset < 0 || offset >= o.objectInfo.Size { return 0, io.EOF } - // prevErr is which was saved in previous operation. - if o.prevErr != nil { - return 0, o.prevErr - } - // Send current information over control channel to indicate we // are ready. reqMsg := readRequest{} @@ -386,11 +389,11 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) { // Negative offset is valid for whence of '2'. if offset < 0 && whence != 2 { - return 0, ErrInvalidArgument(fmt.Sprintf("Object: negative position not allowed for %d.", whence)) + return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence)) } switch whence { default: - return 0, ErrInvalidArgument(fmt.Sprintf("Object: invalid whence %d", whence)) + return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) case 0: if offset > o.objectInfo.Size { return 0, io.EOF @@ -410,7 +413,7 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) { } // Seeking to negative position not allowed for whence. if o.objectInfo.Size+offset < 0 { - return 0, ErrInvalidArgument(fmt.Sprintf("Object: Seeking at negative offset not allowed for %d", whence)) + return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) } o.currOffset += offset } @@ -428,17 +431,19 @@ func (o *Object) Close() (err error) { o.mutex.Lock() defer o.mutex.Unlock() - // prevErr is which was saved in previous operation. - if o.prevErr != nil { + // if already closed return an error. + if o.isClosed { return o.prevErr } // Close successfully. close(o.doneCh) - // Save this for any subsequent frivolous reads. - errMsg := "Object: Is already closed. Bad file descriptor." + // Save for future operations. + errMsg := "Object is already closed. Bad file descriptor." o.prevErr = errors.New(errMsg) + // Save here that we closed done channel successfully. + o.isClosed = true return nil } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-common.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-common.go new file mode 100644 index 000000000..2be4d202c --- /dev/null +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-common.go @@ -0,0 +1,167 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/md5" + "crypto/sha256" + "hash" + "io" + "os" +) + +// Verify if reader is *os.File +func isFile(reader io.Reader) (ok bool) { + _, ok = reader.(*os.File) + return +} + +// Verify if reader is *minio.Object +func isObject(reader io.Reader) (ok bool) { + _, ok = reader.(*Object) + return +} + +// Verify if reader is a generic ReaderAt +func isReadAt(reader io.Reader) (ok bool) { + _, ok = reader.(io.ReaderAt) + return +} + +// hashCopyN - Calculates Md5sum and SHA256sum for upto partSize amount of bytes. +func (c Client) hashCopyN(writer io.ReadWriteSeeker, reader io.Reader, partSize int64) (md5Sum, sha256Sum []byte, size int64, err error) { + // MD5 and SHA256 hasher. + var hashMD5, hashSHA256 hash.Hash + // MD5 and SHA256 hasher. + hashMD5 = md5.New() + hashWriter := io.MultiWriter(writer, hashMD5) + if c.signature.isV4() { + hashSHA256 = sha256.New() + hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256) + } + + // Copies to input at writer. + size, err = io.CopyN(hashWriter, reader, partSize) + if err != nil { + // If not EOF return error right here. + if err != io.EOF { + return nil, nil, 0, err + } + } + + // Seek back to beginning of input, any error fail right here. + if _, err := writer.Seek(0, 0); err != nil { + return nil, nil, 0, err + } + + // Finalize md5shum and sha256 sum. + md5Sum = hashMD5.Sum(nil) + if c.signature.isV4() { + sha256Sum = hashSHA256.Sum(nil) + } + return md5Sum, sha256Sum, size, err +} + +// getUploadID - fetch upload id if already present for an object name +// or initiate a new request to fetch a new upload id. +func (c Client) getUploadID(bucketName, objectName, contentType string) (uploadID string, isNew bool, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return "", false, err + } + if err := isValidObjectName(objectName); err != nil { + return "", false, err + } + + // Set content Type to default if empty string. + if contentType == "" { + contentType = "application/octet-stream" + } + + // Find upload id for previous upload for an object. + uploadID, err = c.findUploadID(bucketName, objectName) + if err != nil { + return "", false, err + } + if uploadID == "" { + // Initiate multipart upload for an object. + initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, contentType) + if err != nil { + return "", false, err + } + // Save the new upload id. + uploadID = initMultipartUploadResult.UploadID + // Indicate that this is a new upload id. + isNew = true + } + return uploadID, isNew, nil +} + +// computeHash - Calculates MD5 and SHA256 for an input read Seeker. +func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) { + // MD5 and SHA256 hasher. + var hashMD5, hashSHA256 hash.Hash + // MD5 and SHA256 hasher. + hashMD5 = md5.New() + hashWriter := io.MultiWriter(hashMD5) + if c.signature.isV4() { + hashSHA256 = sha256.New() + hashWriter = io.MultiWriter(hashMD5, hashSHA256) + } + + size, err = io.Copy(hashWriter, reader) + if err != nil { + return nil, nil, 0, err + } + + // Seek back reader to the beginning location. + if _, err := reader.Seek(0, 0); err != nil { + return nil, nil, 0, err + } + + // Finalize md5shum and sha256 sum. + md5Sum = hashMD5.Sum(nil) + if c.signature.isV4() { + sha256Sum = hashSHA256.Sum(nil) + } + return md5Sum, sha256Sum, size, nil +} + +// Fetch all parts info, including total uploaded size, maximum part +// size and max part number. +func (c Client) getPartsInfo(bucketName, objectName, uploadID string) (prtsInfo map[int]objectPart, totalSize int64, maxPrtSize int64, maxPrtNumber int, err error) { + // Fetch previously upload parts. + prtsInfo, err = c.listObjectParts(bucketName, objectName, uploadID) + if err != nil { + return nil, 0, 0, 0, err + } + // Peek through all the parts and calculate totalSize, maximum + // part size and last part number. + for _, prtInfo := range prtsInfo { + // Save previously uploaded size. + totalSize += prtInfo.Size + // Choose the maximum part size. + if prtInfo.Size >= maxPrtSize { + maxPrtSize = prtInfo.Size + } + // Choose the maximum part number. + if maxPrtNumber < prtInfo.PartNumber { + maxPrtNumber = prtInfo.PartNumber + } + } + return prtsInfo, totalSize, maxPrtSize, maxPrtNumber, nil +} diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-file.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-file.go index 5bc92d3bc..e8b883b9a 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-file.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-file.go @@ -17,80 +17,14 @@ package minio import ( - "crypto/md5" - "crypto/sha256" "encoding/hex" "fmt" - "hash" "io" "io/ioutil" "os" "sort" ) -// getUploadID - fetch upload id if already present for an object name -// or initiate a new request to fetch a new upload id. -func (c Client) getUploadID(bucketName, objectName, contentType string) (string, error) { - // Input validation. - if err := isValidBucketName(bucketName); err != nil { - return "", err - } - if err := isValidObjectName(objectName); err != nil { - return "", err - } - - // Set content Type to default if empty string. - if contentType == "" { - contentType = "application/octet-stream" - } - - // Find upload id for previous upload for an object. - uploadID, err := c.findUploadID(bucketName, objectName) - if err != nil { - return "", err - } - if uploadID == "" { - // Initiate multipart upload for an object. - initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, contentType) - if err != nil { - return "", err - } - // Save the new upload id. - uploadID = initMultipartUploadResult.UploadID - } - return uploadID, nil -} - -// computeHash - Calculates MD5 and SHA256 for an input read Seeker. -func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) { - // MD5 and SHA256 hasher. - var hashMD5, hashSHA256 hash.Hash - // MD5 and SHA256 hasher. - hashMD5 = md5.New() - hashWriter := io.MultiWriter(hashMD5) - if c.signature.isV4() { - hashSHA256 = sha256.New() - hashWriter = io.MultiWriter(hashMD5, hashSHA256) - } - - size, err = io.Copy(hashWriter, reader) - if err != nil { - return nil, nil, 0, err - } - - // Seek back reader to the beginning location. - if _, err := reader.Seek(0, 0); err != nil { - return nil, nil, 0, err - } - - // Finalize md5shum and sha256 sum. - md5Sum = hashMD5.Sum(nil) - if c.signature.isV4() { - sha256Sum = hashSHA256.Sum(nil) - } - return md5Sum, sha256Sum, size, nil -} - // FPutObject - Create an object in a bucket, with contents from file at filePath. func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) { // Input validation. @@ -194,7 +128,7 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe // Get upload id for an object, initiates a new multipart request // if it cannot find any previously partially uploaded object. - uploadID, err := c.getUploadID(bucketName, objectName, contentType) + uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType) if err != nil { return 0, err } @@ -205,19 +139,19 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe // Complete multipart upload. var completeMultipartUpload completeMultipartUpload - // Fetch previously upload parts. - partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) - if err != nil { - return 0, err - } - // Previous maximum part size var prevMaxPartSize int64 - // Loop through all parts and fetch prevMaxPartSize. - for _, partInfo := range partsInfo { - // Choose the maximum part size. - if partInfo.Size >= prevMaxPartSize { - prevMaxPartSize = partInfo.Size + + // A map of all uploaded parts. + var partsInfo = make(map[int]objectPart) + + // If this session is a continuation of a previous session fetch all + // previously uploaded parts info. + if !isNew { + // Fetch previously upload parts and maximum part size. + partsInfo, _, prevMaxPartSize, _, err = c.getPartsInfo(bucketName, objectName, uploadID) + if err != nil { + return 0, err } } diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-multipart.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-multipart.go index 6cacc9800..4b757f081 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-multipart.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-multipart.go @@ -18,11 +18,8 @@ package minio import ( "bytes" - "crypto/md5" - "crypto/sha256" "encoding/hex" "encoding/xml" - "hash" "io" "io/ioutil" "net/http" @@ -33,58 +30,6 @@ import ( "strings" ) -// Verify if reader is *os.File -func isFile(reader io.Reader) (ok bool) { - _, ok = reader.(*os.File) - return -} - -// Verify if reader is *minio.Object -func isObject(reader io.Reader) (ok bool) { - _, ok = reader.(*Object) - return -} - -// Verify if reader is a generic ReaderAt -func isReadAt(reader io.Reader) (ok bool) { - _, ok = reader.(io.ReaderAt) - return -} - -// hashCopyN - Calculates Md5sum and SHA256sum for upto partSize amount of bytes. -func (c Client) hashCopyN(writer io.ReadWriteSeeker, reader io.Reader, partSize int64) (md5Sum, sha256Sum []byte, size int64, err error) { - // MD5 and SHA256 hasher. - var hashMD5, hashSHA256 hash.Hash - // MD5 and SHA256 hasher. - hashMD5 = md5.New() - hashWriter := io.MultiWriter(writer, hashMD5) - if c.signature.isV4() { - hashSHA256 = sha256.New() - hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256) - } - - // Copies to input at writer. - size, err = io.CopyN(hashWriter, reader, partSize) - if err != nil { - // If not EOF return error right here. - if err != io.EOF { - return nil, nil, 0, err - } - } - - // Seek back to beginning of input, any error fail right here. - if _, err := writer.Seek(0, 0); err != nil { - return nil, nil, 0, err - } - - // Finalize md5shum and sha256 sum. - md5Sum = hashMD5.Sum(nil) - if c.signature.isV4() { - sha256Sum = hashSHA256.Sum(nil) - } - return md5Sum, sha256Sum, size, err -} - // Comprehensive put object operation involving multipart resumable uploads. // // Following code handles these types of readers. @@ -130,7 +75,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i // getUploadID for an object, initiates a new multipart request // if it cannot find any previously partially uploaded object. - uploadID, err := c.getUploadID(bucketName, objectName, contentType) + uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType) if err != nil { return 0, err } @@ -141,18 +86,19 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i // Complete multipart upload. var completeMultipartUpload completeMultipartUpload - // Fetch previously upload parts. - partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) - if err != nil { - return 0, err - } // Previous maximum part size var prevMaxPartSize int64 - // Loop through all parts and calculate totalUploadedSize. - for _, partInfo := range partsInfo { - // Choose the maximum part size. - if partInfo.Size >= prevMaxPartSize { - prevMaxPartSize = partInfo.Size + + // A map of all previously uploaded parts. + var partsInfo = make(map[int]objectPart) + + // If This session is a continuation of a previous session fetch all + // previously uploaded parts info. + if !isNew { + // Fetch previously uploaded parts and maximum part size. + partsInfo, _, prevMaxPartSize, _, err = c.getPartsInfo(bucketName, objectName, uploadID) + if err != nil { + return 0, err } } @@ -204,6 +150,9 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i // Close the temporary file. tmpFile.Close() + // Save successfully uploaded size. + totalUploadedSize += size + // If read error was an EOF, break out of the loop. if rErr == io.EOF { break @@ -223,8 +172,6 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i complPart.ETag = part.ETag complPart.PartNumber = part.PartNumber completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart) - // Save successfully uploaded size. - totalUploadedSize += part.Size } // Verify if partNumber is different than total list of parts. diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-readat.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-readat.go index 6d1b0e1fe..058a0815e 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-readat.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-readat.go @@ -46,7 +46,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read // Get upload id for an object, initiates a new multipart request // if it cannot find any previously partially uploaded object. - uploadID, err := c.getUploadID(bucketName, objectName, contentType) + uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType) if err != nil { return 0, err } @@ -57,25 +57,21 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read // Complete multipart upload. var completeMultipartUpload completeMultipartUpload - // Fetch previously upload parts. - partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) - if err != nil { - return 0, err - } - // Previous maximum part size var prevMaxPartSize int64 + // Previous part number. var prevPartNumber int - // Loop through all parts and calculate totalUploadedSize. - for _, partInfo := range partsInfo { - totalUploadedSize += partInfo.Size - // Choose the maximum part size. - if partInfo.Size >= prevMaxPartSize { - prevMaxPartSize = partInfo.Size + + // A map of all uploaded parts. + var partsInfo = make(map[int]objectPart) + + // Fetch all parts info previously uploaded. + if !isNew { + partsInfo, totalUploadedSize, prevMaxPartSize, prevPartNumber, err = c.getPartsInfo(bucketName, objectName, uploadID) + if err != nil { + return 0, err } - // Save previous part number. - prevPartNumber = partInfo.PartNumber } // Calculate the optimal part size for a given file size. diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_v2_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_v2_test.go index 51ba285c3..cb4c10c61 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_v2_test.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_v2_test.go @@ -31,6 +31,90 @@ import ( "github.com/minio/minio-go" ) +func TestGetObjectClosedTwiceV2(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Generate data more than 32K + buf := make([]byte, rand.Intn(1<<20)+32*1024) + + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error:", err) + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano())) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + st, err := r.Stat() + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + if st.Size != int64(len(buf)) { + t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n", + len(buf), st.Size) + } + if err := r.Close(); err != nil { + t.Fatal("Error:", err) + } + if err := r.Close(); err == nil { + t.Fatal("Error: object is already closed, should return error") + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + // Tests removing partially uploaded objects. func TestRemovePartiallyUploadedV2(t *testing.T) { if testing.Short() { diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_v4_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_v4_test.go index d452d8484..81c3de9e0 100644 --- a/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_v4_test.go +++ b/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_v4_test.go @@ -55,6 +55,90 @@ func randString(n int, src rand.Source) string { return string(b[0:30]) } +func TestGetObjectClosedTwice(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Connect and make sure bucket exists. + c, err := minio.New( + "s3.amazonaws.com", + os.Getenv("ACCESS_KEY"), + os.Getenv("SECRET_KEY"), + false, + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + + // Make a new bucket. + err = c.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + // Generate data more than 32K + buf := make([]byte, rand.Intn(1<<20)+32*1024) + + _, err = io.ReadFull(crand.Reader, buf) + if err != nil { + t.Fatal("Error:", err) + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano())) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + if n != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + st, err := r.Stat() + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + if st.Size != int64(len(buf)) { + t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n", + len(buf), st.Size) + } + if err := r.Close(); err != nil { + t.Fatal("Error:", err) + } + if err := r.Close(); err == nil { + t.Fatal("Error: object is already closed, should return error") + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } +} + // Tests removing partially uploaded objects. func TestRemovePartiallyUploaded(t *testing.T) { if testing.Short() { From c6e1696f0796035123745445a8434b203959b101 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 9 Jan 2016 21:24:21 +0100 Subject: [PATCH 52/55] Fix debug message --- backend/s3/s3.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 8aa4f3065..87b41e72e 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -43,7 +43,7 @@ func Open(cfg Config) (backend.Backend, error) { be.createConnections() if err := client.BucketExists(cfg.Bucket); err != nil { - debug.Log("s3.Open", "BucketExists(%v) returned err %v, trying to create the bucket", err) + debug.Log("s3.Open", "BucketExists(%v) returned err %v, trying to create the bucket", cfg.Bucket, err) // create new bucket with default ACL in default region err = client.MakeBucket(cfg.Bucket, "", "") From 1dd4c52a8b48b3d0c17ac06c1bb4eb14de984505 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 17 Jan 2016 16:59:03 +0100 Subject: [PATCH 53/55] Add comments, configure flag library --- cmd/restic/global.go | 22 +++++++++++++++++----- cmd/restic/main.go | 5 +++++ 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 2c73da530..5d9875fce 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -21,11 +21,13 @@ import ( var version = "compiled manually" var compiledAt = "unknown time" +// GlobalOptions holds all those options that can be set for every command. type GlobalOptions struct { - Repo string `short:"r" long:"repo" description:"Repository directory to backup to/restore from"` - CacheDir string ` long:"cache-dir" description:"Directory to use as a local cache"` - Quiet bool `short:"q" long:"quiet" default:"false" description:"Do not output comprehensive progress report"` - NoLock bool ` long:"no-lock" default:"false" description:"Do not lock the repo, this allows some operations on read-only repos."` + Repo string `short:"r" long:"repo" description:"Repository directory to backup to/restore from"` + CacheDir string ` long:"cache-dir" description:"Directory to use as a local cache"` + Quiet bool `short:"q" long:"quiet" default:"false" description:"Do not output comprehensive progress report"` + NoLock bool ` long:"no-lock" default:"false" description:"Do not lock the repo, this allows some operations on read-only repos."` + Options []string `short:"o" long:"option" description:"Specify options in the form 'foo.key=value'"` password string stdout io.Writer @@ -33,8 +35,9 @@ type GlobalOptions struct { } var globalOpts = GlobalOptions{stdout: os.Stdout, stderr: os.Stderr} -var parser = flags.NewParser(&globalOpts, flags.Default) +var parser = flags.NewParser(&globalOpts, flags.HelpFlag|flags.PassDoubleDash) +// Printf writes the message to the configured stdout stream. func (o GlobalOptions) Printf(format string, args ...interface{}) { _, err := fmt.Fprintf(o.stdout, format, args...) if err != nil { @@ -43,6 +46,7 @@ func (o GlobalOptions) Printf(format string, args ...interface{}) { } } +// Verbosef calls Printf to write the message when the verbose flag is set. func (o GlobalOptions) Verbosef(format string, args ...interface{}) { if o.Quiet { return @@ -51,6 +55,8 @@ func (o GlobalOptions) Verbosef(format string, args ...interface{}) { o.Printf(format, args...) } +// ShowProgress returns true iff the progress status should be written, i.e. +// the quiet flag is not set and the output is a terminal. func (o GlobalOptions) ShowProgress() bool { if o.Quiet { return false @@ -63,6 +69,7 @@ func (o GlobalOptions) ShowProgress() bool { return true } +// Warnf writes the message to the configured stderr stream. func (o GlobalOptions) Warnf(format string, args ...interface{}) { _, err := fmt.Fprintf(o.stderr, format, args...) if err != nil { @@ -71,6 +78,7 @@ func (o GlobalOptions) Warnf(format string, args ...interface{}) { } } +// Exitf uses Warnf to write the message and then calls os.Exit(exitcode). func (o GlobalOptions) Exitf(exitcode int, format string, args ...interface{}) { if format[len(format)-1] != '\n' { format += "\n" @@ -108,6 +116,7 @@ func readPasswordTerminal(in *os.File, out io.Writer, prompt string) (password s return password, nil } +// ReadPassword reads the password from stdin. func (o GlobalOptions) ReadPassword(prompt string) string { var ( password string @@ -131,6 +140,8 @@ func (o GlobalOptions) ReadPassword(prompt string) string { return password } +// ReadPasswordTwice calls ReadPassword two times and returns an error when the +// passwords don't match. func (o GlobalOptions) ReadPasswordTwice(prompt1, prompt2 string) string { pw1 := o.ReadPassword(prompt1) pw2 := o.ReadPassword(prompt2) @@ -141,6 +152,7 @@ func (o GlobalOptions) ReadPasswordTwice(prompt1, prompt2 string) string { return pw1 } +// OpenRepository reads the password and opens the repository. func (o GlobalOptions) OpenRepository() (*repository.Repository, error) { if o.Repo == "" { return nil, errors.New("Please specify repository location (-r)") diff --git a/cmd/restic/main.go b/cmd/restic/main.go index a11bab84f..32598fe23 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -25,9 +25,14 @@ func main() { _, err := parser.Parse() if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp { + parser.WriteHelp(os.Stdout) os.Exit(0) } + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + } + if restic.IsAlreadyLocked(err) { fmt.Fprintf(os.Stderr, "\nthe `unlock` command can be used to remove stale locks\n") } From 877f3f61a0408c05267b32cfae4130e5eb6154da Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 17 Jan 2016 18:33:03 +0100 Subject: [PATCH 54/55] Add flag to disable cross-compilation --- run_integration_tests.go | 64 +++++++++++++++++++++++----------------- 1 file changed, 37 insertions(+), 27 deletions(-) diff --git a/run_integration_tests.go b/run_integration_tests.go index 700f1d032..5ea7957bc 100644 --- a/run_integration_tests.go +++ b/run_integration_tests.go @@ -4,6 +4,7 @@ package main import ( "bytes" + "flag" "fmt" "io/ioutil" "os" @@ -15,6 +16,12 @@ import ( "sync" ) +var runCrossCompile = flag.Bool("cross-compile", true, "run cross compilation tests") + +func init() { + flag.Parse() +} + type CIEnvironment interface { Prepare() RunTests() @@ -35,7 +42,6 @@ func (env *TravisEnvironment) Prepare() { run("go", "get", "golang.org/x/tools/cmd/cover") run("go", "get", "github.com/mattn/goveralls") run("go", "get", "github.com/pierrre/gotestcover") - run("go", "get", "github.com/mitchellh/gox") runWithEnv(envVendorExperiment, "go", "get", "github.com/minio/minio") if runtime.GOOS == "darwin" { @@ -45,26 +51,29 @@ func (env *TravisEnvironment) Prepare() { run("brew", "cask", "install", "osxfuse") } - // only test cross compilation on linux with Travis - if runtime.GOOS == "linux" { - env.goxArch = []string{"386", "amd64"} - if !strings.HasPrefix(runtime.Version(), "go1.3") { - env.goxArch = append(env.goxArch, "arm") + if *runCrossCompile { + // only test cross compilation on linux with Travis + run("go", "get", "github.com/mitchellh/gox") + if runtime.GOOS == "linux" { + env.goxArch = []string{"386", "amd64"} + if !strings.HasPrefix(runtime.Version(), "go1.3") { + env.goxArch = append(env.goxArch, "arm") + } + + env.goxOS = []string{"linux", "darwin", "freebsd", "openbsd", "windows"} + } else { + env.goxArch = []string{runtime.GOARCH} + env.goxOS = []string{runtime.GOOS} } - env.goxOS = []string{"linux", "darwin", "freebsd", "openbsd", "windows"} - } else { - env.goxArch = []string{runtime.GOARCH} - env.goxOS = []string{runtime.GOOS} - } + msg("gox: OS %v, ARCH %v\n", env.goxOS, env.goxArch) - msg("gox: OS %v, ARCH %v\n", env.goxOS, env.goxArch) - - v := runtime.Version() - if !strings.HasPrefix(v, "go1.5") && !strings.HasPrefix(v, "go1.6") { - run("gox", "-build-toolchain", - "-os", strings.Join(env.goxOS, " "), - "-arch", strings.Join(env.goxArch, " ")) + v := runtime.Version() + if !strings.HasPrefix(v, "go1.5") && !strings.HasPrefix(v, "go1.6") { + run("gox", "-build-toolchain", + "-os", strings.Join(env.goxOS, " "), + "-arch", strings.Join(env.goxArch, " ")) + } } } @@ -95,14 +104,16 @@ func (env *TravisEnvironment) RunTests() { os.Setenv("RESTIC_TEST_FUSE", "0") } - // compile for all target architectures with tags - for _, tags := range []string{"release", "debug"} { - run("gox", "-verbose", - "-os", strings.Join(env.goxOS, " "), - "-arch", strings.Join(env.goxArch, " "), - "-tags", tags, - "-output", "/tmp/{{.Dir}}_{{.OS}}_{{.Arch}}", - "./cmd/restic") + if *runCrossCompile { + // compile for all target architectures with tags + for _, tags := range []string{"release", "debug"} { + run("gox", "-verbose", + "-os", strings.Join(env.goxOS, " "), + "-arch", strings.Join(env.goxArch, " "), + "-tags", tags, + "-output", "/tmp/{{.Dir}}_{{.OS}}_{{.Arch}}", + "./cmd/restic") + } } // run the build script @@ -136,7 +147,6 @@ type AppveyorEnvironment struct{} func (env *AppveyorEnvironment) Prepare() { msg("preparing environment for Appveyor CI\n") - runWithEnv(envVendorExperiment, "go", "get", "github.com/minio/minio") } func (env *AppveyorEnvironment) RunTests() { From c722851f92ec15b4d57af98644ed40c82cabfa12 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 17 Jan 2016 18:50:50 +0100 Subject: [PATCH 55/55] Update Dockerfile --- Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index e2810584d..03d8c9628 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,7 +15,7 @@ FROM ubuntu:14.04 -ARG GOVERSION=1.5.2 +ARG GOVERSION=1.5.3 ARG GOARCH=amd64 # install dependencies @@ -45,6 +45,7 @@ RUN mkdir -p $GOPATH/src/github.com/restic/restic RUN go get golang.org/x/tools/cmd/cover RUN go get github.com/mattn/goveralls RUN go get github.com/mitchellh/gox +RUN go get github.com/pierrre/gotestcover RUN GO15VENDOREXPERIMENT=1 go get github.com/minio/minio # set TRAVIS_BUILD_DIR for integration script